caffe源码解读(13)-blob.hpp

  caffe使用称为blob的四维数组用于存储和交换数据。Blob提供了统一的存储器接口,持有一批图像或其他数据、权值、权值更新值。Blob在内存中表示四维数组,维度从低到高为[width_,height_,channels_,num_],其中,width_和height_表示图像的宽和高,channes_表示颜色通道RGB,num_表示第几帧,用于存储数据或权值(data)和权值增量(diff),在进行网络计算时,每层的输入和输出都需要通过Blob对象缓冲。Blob是caffe的基本存储单元,下面对其进行简单介绍,主要参考赵永科所著的《深度学习.21天实战caffe》中对该Blob的详细介绍。
  数据结构描述
  打开src/caffe/proto/caffe.proto,首先映入眼帘的便是与Blob相关的描述,可见该数据结构的重要性,是其他大部分数据结构的依赖项。

//该结构描述了Blob的形状信息
message BlobShape {
  repeated int64 dim = 1 [packed = true];
  //只包括若干int64类型值,分别表示Blob每个维度大小,
  //packed表示这些值在内存中紧密排列,没有空洞
}
//该结构描述了Blob在磁盘中序列化后的状态
message BlobProto {
 //可选,包括一个BlobShape对象
  optional BlobShape shape = 7;
  //包括若干浮点元素,存储数据或权值
  //元素数目由shape或(num,channel,height,width)确定
  //这些元素在内存中紧密排列
  repeated float data = 5 [packed = true];
  repeated float diff = 6 [packed = true];
  repeated double double_data = 8 [packed = true];//与data并列,只是类型为double
  repeated double double_diff = 9 [packed = true];//与diff并列,只是类型为double

  // 4D dimensions -- deprecated.  Use "shape" instead.
  //以下为可选的维度信息,新版本caffe推荐使用shape,而不再用后面的值
  optional int32 num = 1 [default = 0];
  optional int32 channels = 2 [default = 0];
  optional int32 height = 3 [default = 0];
  optional int32 width = 4 [default = 0];
}

Blob是怎样练成的
Blob是一个模板类,声明在include/caffe/blob.hpp中,封装了SynceMemory。

#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_
#include <algorithm>
#include <string>
#include <vector>
#include "caffe/common.hpp"
//由protoc生成的头文件
#include "caffe/proto/caffe.pb.h"
//CPU/GPU共享内存类,用于数据同步
#include "caffe/syncedmem.hpp"
//Blob最大维数目
const int kMaxBlobAxes = 32;
namespace caffe {
template <typename Dtype>
class Blob {//类声明
 public:
 //默认构造函数
  Blob()
       : data_(), diff_(), count_(0), capacity_(0) {}

  explicit Blob(const int num, const int channels, const int height,
  //显式构造函数,避免隐式数据类型转换
      const int width);
  explicit Blob(const vector<int>& shape);

  void Reshape(const int num, const int channels, const int height,
      const int width);
  //变形函数,根据输入参数重新设置当前blob形状,必要时重新分配内存
  void Reshape(const vector<int>& shape);
  void Reshape(const BlobShape& shape);
  void ReshapeLike(const Blob& other);
  //得到Blob形状字符串,用于打印log,见caffe运行log,
  //类似于"Top shape: 100 1 28 28 (78400)"
  inline string shape_string() const {
    ostringstream stream;
    for (int i = 0; i < shape_.size(); ++i) {
      stream << shape_[i] << " ";
    }
    stream << "(" << count_ << ")";
    return stream.str();
  }
  //返回Blob形状
  inline const vector<int>& shape() const { return shape_; }
  //返回某一维度的尺寸
  inline int shape(int index) const {
    return shape_[CanonicalAxisIndex(index)];
  }
  //返回维度数目
  inline int num_axes() const { return shape_.size(); }
  //返回Blob中元素总数
  inline int count() const { return count_; }
 //返回Blob中某几维子集的元素总数
  inline int count(int start_axis, int end_axis) const {
    CHECK_LE(start_axis, end_axis);//保证start_axis<=end_axis
    CHECK_GE(start_axis, 0);//保证start_axis>= 0
    CHECK_GE(end_axis, 0);//保证end_axis>= 0
    CHECK_LE(start_axis, num_axes());//保证start_axis<=总的维度数目
    CHECK_LE(end_axis, num_axes());//保证end_axis<=总的维度数目
    int count = 1;
    for (int i = start_axis; i < end_axis; ++i) {
      count *= shape(i);
    }
    return count;
  }
  //计算从某一维度开始的元素总数
  inline int count(int start_axis) const {
    return count(start_axis, num_axes());
  }
//转换座标轴索引[-N,N]为普通索引[0,N]
  inline int CanonicalAxisIndex(int axis_index) const {
    CHECK_GE(axis_index, -num_axes())//保证axis_index>=-num_axes()
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    CHECK_LT(axis_index, num_axes())//保证axis_index<num_axes()
        << "axis " << axis_index << " out of range for " << num_axes()
        << "-D Blob with shape " << shape_string();
    if (axis_index < 0) {
      return axis_index + num_axes();
    }
    return axis_index;
  }
  //获取形状某一维的尺寸
  inline int num() const { return LegacyShape(0); }
  inline int channels() const { return LegacyShape(1); }
  inline int height() const { return LegacyShape(2); 
  inline int width() const { return LegacyShape(3); }
  inline int LegacyShape(int index) const {
    CHECK_LE(num_axes(), 4)
        << "Cannot use legacy accessors on Blobs with > 4 axes.";
    CHECK_LT(index, 4);
    CHECK_GE(index, -4);
    if (index >= num_axes() || index < -num_axes()) {
      return 1;
    }
    return shape(index);
  }
//下面这几个函数都是计算偏移量的
  inline int offset(const int n, const int c = 0, const int h = 0,
      const int w = 0) const {
    CHECK_GE(n, 0);
    CHECK_LE(n, num());
    CHECK_GE(channels(), 0);
    CHECK_LE(c, channels());
    CHECK_GE(height(), 0);
    CHECK_LE(h, height());
    CHECK_GE(width(), 0);
    CHECK_LE(w, width());
    return ((n * channels() + c) * height() + h) * width() + w;
  }

  inline int offset(const vector<int>& indices) const {
    CHECK_LE(indices.size(), num_axes());
    int offset = 0;
    for (int i = 0; i < num_axes(); ++i) {
      offset *= shape(i);
      if (indices.size() > i) {
        CHECK_GE(indices[i], 0);
        CHECK_LT(indices[i], shape(i));
        offset += indices[i];
      }
    }
    return offset;
  }
  //按值拷贝Blob到当前Blob
  void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
      bool reshape = false);

  inline Dtype data_at(const int n, const int c, const int h,
      const int w) const {
    return cpu_data()[offset(n, c, h, w)];
  }

  inline Dtype diff_at(const int n, const int c, const int h,
      const int w) const {
    return cpu_diff()[offset(n, c, h, w)];
  }

  inline Dtype data_at(const vector<int>& index) const {
    return cpu_data()[offset(index)];
  }

  inline Dtype diff_at(const vector<int>& index) const {
    return cpu_diff()[offset(index)];
  }

  inline const shared_ptr<SyncedMemory>& data() const {
    CHECK(data_);
    return data_;
  }

  inline const shared_ptr<SyncedMemory>& diff() const {
    CHECK(diff_);
    return diff_;
  }
  //只读访问cpu data
  const Dtype* cpu_data() const;
  //设置cpu data
  void set_cpu_data(Dtype* data);
  //只读访问gpu data
  const int* gpu_shape() const;
  const Dtype* gpu_data() const;
  //设置gpu data
  void set_gpu_data(Dtype* data);
  //只读访问cpu diff
  const Dtype* cpu_diff() const;
  //只读访问gpu diff
  const Dtype* gpu_diff() const;
  //读写访问cpu data、gpu data、cpu diff、gpu diff
  Dtype* mutable_cpu_data();
  Dtype* mutable_gpu_data();
  Dtype* mutable_cpu_diff();
  Dtype* mutable_gpu_diff();
  void Update();//Blob更新运算,简单理解为data与diff的merge过程
  //反序列化函数,从BlobProto中回复一个Blob对象
  void FromProto(const BlobProto& proto, bool reshape = true);
  //序列化函数,将内存中的Blob对象保存到BlobProto中
  void ToProto(BlobProto* proto, bool write_diff = false) const;
 //计算data的L1范数(qiuhe                    )
  Dtype asum_data() const;
  //计算diff的L1范数
  Dtype asum_diff() const;
 //计算data的L2范数
  Dtype sumsq_data() const;
 //计算diff的L2范数
  Dtype sumsq_diff() const;

  void scale_data(Dtype scale_factor);
  void scale_diff(Dtype scale_factor);
  void ShareData(const Blob& other);
  void ShareDiff(const Blob& other);
  bool ShapeEquals(const BlobProto& other);

 protected:
  shared_ptr<SyncedMemory> data_;//存放指向data的指针
  shared_ptr<SyncedMemory> diff_;//存放指向diff的指针
  shared_ptr<SyncedMemory> shape_data_;
  vector<int> shape_;//形状信息
  int count_;//存放有效元素数目信息
  int capacity_;//存放Blob容器的容量信息
  DISABLE_COPY_AND_ASSIGN(Blob);
};  // class Blob

}  // namespace caffe

#endif  // CAFFE_BLOB_HPP_
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章