TensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h,caffeWeightFactory.cpp源码研读一

前言

TensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.hTensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp这两个档案里定义了CaffeWeightFactory这个类别。

它被用来获取、做型别转换或随机生成 神经网路的权重(nvinfer1::Weights)。此外还定义了一些metadata来记录包括:是否初始化(mInitialized),转换或获取权重时是否出现异常(mOK),权重的资料型别(mDataType)等资讯。

由于篇幅限制,本篇先介绍建构子,围绕著私有成员变数的函数,及checkForNanssizeOfCaffeType这两个utility function。

之后将继续介绍用于转换权重型别的函数,随机生成权重的函数,与trtcaffe::BlobProto有关的函数及用于获取权重的函数。

TensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.h

/*
 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef TRT_CAFFE_PARSER_CAFFE_WEIGHT_FACTORY_H
#define TRT_CAFFE_PARSER_CAFFE_WEIGHT_FACTORY_H

#include <vector>
#include <string>
#include <random>
#include <memory>
#include "NvInfer.h"
#include "weightType.h"
#include "trtcaffe.pb.h"

namespace nvcaffeparser1
{
//用于生成nvinfer1::Weights的工厂?
//此类别用于获取、做型别转换或随机生成 神经网路的权重(nvinfer1::Weights)
class CaffeWeightFactory
{
public:
    CaffeWeightFactory(const trtcaffe::NetParameter& msg, nvinfer1::DataType dataType, std::vector<void*>& tmpAllocs, bool isInitialized);
    nvinfer1::DataType getDataType() const;
    size_t getDataTypeSize() const;
    std::vector<void*>& getTmpAllocs();
    int getBlobsSize(const std::string& layerName);
    const trtcaffe::BlobProto* getBlob(const std::string& layerName, int index);
    std::vector<nvinfer1::Weights> getAllWeights(const std::string& layerName);
    virtual nvinfer1::Weights operator()(const std::string& layerName, WeightType weightType);
    void convert(nvinfer1::Weights& weights, nvinfer1::DataType targetType);
    void convert(nvinfer1::Weights& weights);
    bool isOK();
    bool isInitialized();
    /*
    class Weights
    定义于include/NvInferRuntime.h
    class Weights
	{
	public:
	    DataType type;      //!< The type of the weights.
	    const void* values; //!< The weight values, in a contiguous array.
	    int64_t count;      //!< The number of weights in the array.
	};
	用于表示神经网路内各层的权重
    */
    nvinfer1::Weights getNullWeights();
    nvinfer1::Weights allocateWeights(int64_t elems, std::uniform_real_distribution<float> distribution = std::uniform_real_distribution<float>(-0.01f, 0.01F));
    nvinfer1::Weights allocateWeights(int64_t elems, std::normal_distribution<float> distribution);
    static trtcaffe::Type getBlobProtoDataType(const trtcaffe::BlobProto& blobMsg);
    static size_t sizeOfCaffeType(trtcaffe::Type type);
    // The size returned here is the number of array entries, not bytes
    static std::pair<const void*, size_t> getBlobProtoData(const  trtcaffe::BlobProto& blobMsg, trtcaffe::Type type, std::vector<void*>& tmpAllocs);

private:
    template <typename T>
    bool checkForNans(const void* values, int count, const std::string& layerName);
    nvinfer1::Weights getWeights(const trtcaffe::BlobProto& blobMsg, const std::string& layerName);

    //记录著神经网路相关的资讯,如:多少层,每层是什么等
    const trtcaffe::NetParameter& mMsg;
    std::unique_ptr<trtcaffe::NetParameter> mRef;
    //将转换权重或分配权重时申请的空间记录于此
    std::vector<void*>& mTmpAllocs;
    //权重的资料型别
    nvinfer1::DataType mDataType;
    // bool mQuantize;
    bool mInitialized;
    //用于随机生成权重的generator
    std::default_random_engine generator;
    //指出转换或获取权重是否异常
    bool mOK{true};
};
} //namespace nvcaffeparser1
#endif //TRT_CAFFE_PARSER_CAFFE_WEIGHT_FACTORY_H

TensorRT/parsers/caffe/caffeWeightFactory/caffeWeightFactory.cpp

/*
 * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "caffeMacros.h"
#include "caffeWeightFactory.h"
#include "half.h"

using namespace nvinfer1;
using namespace nvcaffeparser1;


//...


CaffeWeightFactory::CaffeWeightFactory(const trtcaffe::NetParameter& msg, DataType dataType, std::vector<void*>& tmpAllocs, bool isInitialized)
    : mMsg(msg)
    , mTmpAllocs(tmpAllocs)
    , mDataType(dataType)
    , mInitialized(isInitialized)
{
    mRef = std::unique_ptr<trtcaffe::NetParameter>(new trtcaffe::NetParameter);
}

//获取初始化时就设定好的成员变数mDataType
DataType CaffeWeightFactory::getDataType() const
{
    return mDataType;
}

//获取其数据类型所占用的byte数
size_t CaffeWeightFactory::getDataTypeSize() const
{
    switch (getDataType())
    {
    case DataType::kFLOAT:
    case DataType::kINT32:
        return 4;
    case DataType::kHALF:
        return 2;
    case DataType::kINT8:
        return 1;
    }
    return 0;
}

//获取初始化时就设定好的成员变数mTmpAllocs
std::vector<void*>& CaffeWeightFactory::getTmpAllocs()
{
    return mTmpAllocs;
}

bool CaffeWeightFactory::isOK()
{
    return mOK;
}

bool CaffeWeightFactory::isInitialized()
{
    return mInitialized;
}

/*
检查values里的东西能否有效地被转成float(即转成float后是否为NaN)
如果成功则回传true,反之则回传false
*/
template <typename T>
bool CaffeWeightFactory::checkForNans(const void* values, int count, const std::string& layerName)
{
    //先转为T型别
    const T* v = reinterpret_cast<const T*>(values);
    for (int i = 0; i < count; i++)
    {
        if (std::isnan(float(v[i])))
        {
            std::cout << layerName << ": Nan detected in weights" << std::endl;
            return false;
        }
    }
    return true;
}

/*
trtcaffe::Type
定义于TensorRT/parsers/caffe/proto/trtcaffe.proto
// Math and storage types
enum Type {
  DOUBLE = 0;
  FLOAT = 1;
  FLOAT16 = 2;
  INT = 3;  // math not supported
  UINT = 4;  // math not supported
}
*/
//回传caffe资料型别占用的byte数
size_t CaffeWeightFactory::sizeOfCaffeType(trtcaffe::Type type)
{
    //分单精度,半精度,双精度三种情况
    if (type == trtcaffe::FLOAT)
    {
        return sizeof(float);
    }
    if (type == trtcaffe::FLOAT16)
    {
        //uint16_t即unsigned short int,占2 bytes
        return sizeof(uint16_t);
    }
    return sizeof(double);
}

std::isnan

CaffeWeightFactory::checkForNans函数中用到了std::isnan函数,关于std::isnan,详见C++ std::isnan及std::isinf

uint16_t及int64_t

sizeOfCaffeType 函数中用到了 uint16_t 这个型别,而 allocateWeights 函数则用到了 int64_t 这个型别,关于这些带_t的型别,详见C uint8_t,int64_t

参考连结

C++ std::isnan及std::isinf

C uint8_t,int64_t

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章