本文主要解析caffe源碼文件/src/caffe/layers/Data_layer.cpp和Base_Data_layer.cpp,這兩個文件主要實現caffe數據層的定義。
data_layer應該是網絡的最底層,主要是將數據送給blob進入到net中。能過代碼可以看到Data_Layer類與Layer類之間存在着如下的繼承關係:::
所以要看懂Data_Layer類構造,要先了解Layer類的構造:http://blog.csdn.net/lanxuecc/article/details/53023211
其次瞭解Base_data_layer.cpp中的BaseDataLayer類與BasePrefetchingDataLayer類,InternalThread類是Caffe中的多線程接口虛類。
Base_data_layer.hpp::::::::
#ifndef CAFFE_DATA_LAYERS_HPP_
#define CAFFE_DATA_LAYERS_HPP_
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/data_transformer.hpp" //data_transformer文件中實現了常用的數據預處理操作,如尺度變換,減均值,鏡像變換等
#include "caffe/internal_thread.hpp" //處理多線程的代碼文件
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/blocking_queue.hpp" //線程隊列的相關文件
namespace caffe {
/**
* @brief Provides base for data layers that feed blobs to the Net.
*
* TODO(dox): thorough documentation for Forward and proto params.
*/
/*Layer的子類,data_layer的基類負責將Blobs數據送入網絡*/
template <typename Dtype>
class BaseDataLayer : public Layer<Dtype> {
public:
explicit BaseDataLayer(const LayerParameter& param); //構造函數,傳入的參數就是solover.prototxt文件中定義的每層的參數
// LayerSetUp: implements common data layer setup functionality, and calls
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden except by the BasePrefetchingDataLayer.
// 該虛函數實現了一般data_layer的功能,能夠調用DataLayerSetUp來完成具體的data_layer的設置
// 只能被BasePrefetchingDataLayer類來重載
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Data layers should be shared by multiple solvers in parallel
// 數據層可以被其他的solver共享
virtual inline bool ShareInParallel() const { return true; }
// 層數據設置,具體要求的data_layer要重載這個函數來具體實現
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
// Data layers have no bottoms, so reshaping is trivial.
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}
//虛函數由子類具體實現具體的cpu與gpu的後向傳播
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {}
protected:
// 在caffe.proto中定義的參數類
TransformationParameter transform_param_;
//DataTransformer類的智能指針,DataTransformer類主要負責對數據進行預處理
shared_ptr<DataTransformer<Dtype> > data_transformer_;
//是否有labels
bool output_labels_;
};
//兩個blob類的對象,數據與標籤
template <typename Dtype>
class Batch {
public:
Blob<Dtype> data_, label_;
};
/*派生自類BaseDataLayer和類InternalThread*/
template <typename Dtype>
class BasePrefetchingDataLayer :
public BaseDataLayer<Dtype>, public InternalThread {
public:
//構造函數
explicit BasePrefetchingDataLayer(const LayerParameter& param);
// LayerSetUp: implements common data layer setup functionality, and calls
// DataLayerSetUp to do special data layer setup for individual layer types.
// This method may not be overridden.
// 該虛函數實現了一般data_layer的功能,能夠調用DataLayerSetUp來完成具體的data_layer的設置
// 該函數不能被重載
void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
//具體的data_layer具體的實現這兩個函數
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// Prefetches batches (asynchronously if to GPU memory)
static const int PREFETCH_COUNT = 3;
protected:
//通過這個函數執行線程函數
virtual void InternalThreadEntry();
//加載batch
virtual void load_batch(Batch<Dtype>* batch) = 0;
/*batch數組*/
Batch<Dtype> prefetch_[PREFETCH_COUNT];
/*兩個阻塞隊列*/
BlockingQueue<Batch<Dtype>*> prefetch_free_; /*從prefetch_free_隊列取數據結構,填充數據結構放到prefetch_full_隊列*/
BlockingQueue<Batch<Dtype>*> prefetch_full_; /*從prefetch_full_隊列取數據,使用數據,清空數據結構,放到prefetch_free_隊列*/
/*轉換過的blob數據,中間變量用來輔助圖像變換*/
Blob<Dtype> transformed_data_;
};
} // namespace caffe
#endif // CAFFE_DATA_LAYERS_HPP_
Base_data_layer.cpp::::::::
#include <boost/thread.hpp>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/data_transformer.hpp"
#include "caffe/internal_thread.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/blocking_queue.hpp"
namespace caffe {
// 構造函數初始化,先用param初始化父類Layer
// 再用param.transform_param()初始化transform_param_s
template <typename Dtype>
BaseDataLayer<Dtype>::BaseDataLayer(const LayerParameter& param)
: Layer<Dtype>(param),
transform_param_(param.transform_param()) {
}
template <typename Dtype>
void BaseDataLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (top.size() == 1) { //獲得是否有label
output_labels_ = false;
} else {
output_labels_ = true;
}
/*創建DataTransformer類的智能指針,用來預處理數據*/
data_transformer_.reset(
new DataTransformer<Dtype>(transform_param_, this->phase_));
data_transformer_->InitRand(); //生成隨機數據種子
// The subclasses should setup the size of bottom and top
DataLayerSetUp(bottom, top); //層數據設置
}
// BasePrefetchingDataLayer構造函數
template <typename Dtype>
BasePrefetchingDataLayer<Dtype>::BasePrefetchingDataLayer(
const LayerParameter& param)
: BaseDataLayer<Dtype>(param),
prefetch_free_(), prefetch_full_() {
for (int i = 0; i < PREFETCH_COUNT; ++i) {
prefetch_free_.push(&prefetch_[i]);
}
}
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
BaseDataLayer<Dtype>::LayerSetUp(bottom, top);// 先調用父類BaseDataLayer的LayerSetUp
// Before starting the prefetch thread, we make cpu_data and gpu_data
// calls so that the prefetch thread does not accidentally make simultaneous
// cudaMalloc calls when the main thread is running. In some GPUs this
// seems to cause failures if we do not so.
// 在開啓prefetch線程之前,調用cpu_data和gpu_data,
// 這樣主線程正在運行時,prefetch線程避免同時調用cudaMalloc,
// 這樣做避免了某些gpu上出現錯誤
for (int i = 0; i < PREFETCH_COUNT; ++i) {
prefetch_[i].data_.mutable_cpu_data(); /*依次給隊列中每個batch的數據blob分配cpu內存*/
if (this->output_labels_) {
prefetch_[i].label_.mutable_cpu_data(); /*依次分配每個每個batch的標籤blob分配cpu內存*/
}
}
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
for (int i = 0; i < PREFETCH_COUNT; ++i) {
prefetch_[i].data_.mutable_gpu_data();/*依次給隊列中每個batch的數據blob分配gpu內存*/
if (this->output_labels_) {
prefetch_[i].label_.mutable_gpu_data();/*依次分配每個每個batch的標籤blob分配gpu內存*/
}
}
}
#endif
DLOG(INFO) << "Initializing prefetch";
this->data_transformer_->InitRand();//生成隨機數據種子
StartInternalThread();//啓動內部讀取數據線程
DLOG(INFO) << "Prefetch initialized.";
}
// 如果有空閒線程,讓該線程去取數據
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::InternalThreadEntry() {
#ifndef CPU_ONLY
cudaStream_t stream;
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking));
}
#endif
try {
while (!must_stop()) {
Batch<Dtype>* batch = prefetch_free_.pop();//從free_隊列去數據結構
load_batch(batch);//取數據,填充數據結構。在其派生類實現的
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
batch->data_.data().get()->async_gpu_push(stream);//異步,把數據同步到GPU,使用Syncedmem->async_gpu_push
CUDA_CHECK(cudaStreamSynchronize(stream));
}
#endif
prefetch_full_.push(batch);//把數據放到full_隊列
}
} catch (boost::thread_interrupted&) {
// Interrupted exception is expected on shutdown
}
#ifndef CPU_ONLY
if (Caffe::mode() == Caffe::GPU) {
CUDA_CHECK(cudaStreamDestroy(stream));
}
#endif
}
// 將預處理過的batch,送到top
// 數據層的forward函數不進行計算,不使用bottom,只是準備數據,填充到top
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");//從full隊列取數據
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);//調整top數據形狀大小,一次讀取一個batch大小的數據
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.cpu_data(),
top[0]->mutable_cpu_data());// Copy the data。把數據拷貝到top中
DLOG(INFO) << "Prefetch copied";
if (this->output_labels_) {//如果有標籤,也要把標籤拷貝到top中
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);//調整top標籤形狀大小
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.cpu_data(),
top[1]->mutable_cpu_data()); //拷貝標籤到top中
}
prefetch_free_.push(batch);//用過的數據結構,放回free隊列
}
#ifdef CPU_ONLY
STUB_GPU_FORWARD(BasePrefetchingDataLayer, Forward);
#endif
INSTANTIATE_CLASS(BaseDataLayer);
INSTANTIATE_CLASS(BasePrefetchingDataLayer);
} // namespace caffe
再來看看Data_layer.cpp中定義的Data_layer類。
Data_layer.hpp
#include "caffe/data_reader.hpp"
#include "caffe/data_transformer.hpp"
#include "caffe/internal_thread.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/db.hpp"
namespace caffe {
/*datalayer繼承了類BasePrefetchingDataLayer*/
template <typename Dtype>
class DataLayer : public BasePrefetchingDataLayer<Dtype> {
public:
//構造函數
explicit DataLayer(const LayerParameter& param); /*傳入protobuf的網絡的層的參數*/
//析構函數
virtual ~DataLayer();
//層設置函數
virtual void DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
// DataLayer uses DataReader instead for sharing for parallelism
// 是否在並行時共享該層
virtual inline bool ShareInParallel() const { return false; }
//返回該層類型
virtual inline const char* type() const { return "Data"; }
//返回輸入的blobs數量,因爲數據層最底層,所以爲0
virtual inline int ExactNumBottomBlobs() const { return 0; }
//返回最小的輸出blobs數量
virtual inline int MinTopBlobs() const { return 1; }
//返回最大的輸出blobs數量
virtual inline int MaxTopBlobs() const { return 2; }
protected:
virtual void load_batch(Batch<Dtype>* batch); //加載數據
DataReader reader_; /*其作用是添加讀取數據任務至,一個專門讀取數據庫(examples/mnist/mnist_train_lmdb)的線程(若還不存在該線程,則創建該線程)*/
};
} // namespace caffe
#endif // CAFFE_DATA_LAYER_HPP_
Data_layer.cpp
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#endif // USE_OPENCV
#include <stdint.h>
#include <vector>
#include "caffe/data_transformer.hpp"
#include "caffe/layers/data_layer.hpp"
#include "caffe/util/benchmark.hpp"
namespace caffe {
template <typename Dtype>
DataLayer<Dtype>::DataLayer(const LayerParameter& param)
: BasePrefetchingDataLayer<Dtype>(param), /*調用基類構造函數BasePrefetchingDataLayer()之後,對 DataReader reader_ 進行賦值*/
reader_(param) {
}
template <typename Dtype>
DataLayer<Dtype>::~DataLayer() {
this->StopInternalThread(); //終止線程
}
/*Data_layer用該函數來完成具體的層設置*/
template <typename Dtype>
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) { /*這裏batch_size就是solver.prototxt中傳入的*/
const int batch_size = this->layer_param_.data_param().batch_size(); /*layer_param_在父類layer中定義*/
// Read a data point, and use it to initialize the top blob.
// 獲取讀的數據指針,然後用它初始化top blob
// Datum是在caffe.prototxt中定義的,DataReader用LayerParameter初始化後(內含有DataParameter),
// 可以獲取要讀的數據信息,並返回Datum,後面在根據Datum來reshape
Datum& datum = *(reader_.full().peek());
// Use data_transformer to infer the expected blob shape from datum.
// 從datum中判斷top blob的形狀
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
//轉換成top_blob需要的形狀
this->transformed_data_.Reshape(top_shape);
// Reshape top[0] and prefetch_data according to the batch_size.
top_shape[0] = batch_size;
top[0]->Reshape(top_shape); /*reshape top[0]中數據*/
// reshape每個線程的prefetch 數據,並且分配內存
for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
this->prefetch_[i].data_.Reshape(top_shape);
}
LOG(INFO) << "output data size: " << top[0]->num() << ","
<< top[0]->channels() << "," << top[0]->height() << ","
<< top[0]->width();
// label
// 如果有標籤,每個線程的標籤也要reshape,並且分配內存
if (this->output_labels_) {
vector<int> label_shape(1, batch_size);
top[1]->Reshape(label_shape);
for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
this->prefetch_[i].label_.Reshape(label_shape);
}
}
}
// This function is called on prefetch thread
// 這個函數被prefetch線程所調用
template<typename Dtype>
void DataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
CPUTimer batch_timer;
batch_timer.Start();
double read_time = 0;
double trans_time = 0;
CPUTimer timer;
CHECK(batch->data_.count());
CHECK(this->transformed_data_.count());
// 讀取一個dataum,用來初始化top blob維度
// Reshape according to the first datum of each batch
// on single input batches allows for inputs of varying dimension.
const int batch_size = this->layer_param_.data_param().batch_size();
Datum& datum = *(reader_.full().peek());
// Use data_transformer to infer the expected blob shape from datum.
vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
this->transformed_data_.Reshape(top_shape);
// Reshape batch according to the batch_size.
top_shape[0] = batch_size;
batch->data_.Reshape(top_shape); /*同時分配內存*/
Dtype* top_data = batch->data_.mutable_cpu_data();
Dtype* top_label = NULL; // suppress warnings about uninitialized variables
if (this->output_labels_) {
top_label = batch->label_.mutable_cpu_data();
}
//循環加載batch
for (int item_id = 0; item_id < batch_size; ++item_id) {
timer.Start();
// get a datum
// 讀取數據datum
Datum& datum = *(reader_.full().pop("Waiting for data"));
// 統計讀取時間
read_time += timer.MicroSeconds();
timer.Start();
// 計算指針offset
// Apply data transformations (mirror, scale, crop...)
int offset = batch->data_.offset(item_id);
this->transformed_data_.set_cpu_data(top_data + offset);
// 將datum數據拷貝到batch中
this->data_transformer_->Transform(datum, &(this->transformed_data_));
// Copy label.
// 拷貝標籤
if (this->output_labels_) {
top_label[item_id] = datum.label();
}
// 統計拷貝時間
trans_time += timer.MicroSeconds();
reader_.free().push(const_cast<Datum*>(&datum));
}
timer.Stop();
// 統計加載batch總時間
batch_timer.Stop();
// 輸出時間開銷
DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
DLOG(INFO) << " Read time: " << read_time / 1000 << " ms.";
DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}
INSTANTIATE_CLASS(DataLayer);
REGISTER_LAYER_CLASS(Data);
} // namespace caffe
感謝::::http://blog.csdn.net/iamzhangzhuping/article/details/50582503