caffe中batch_norm層代碼註解
template <typename Dtype>
void BatchNormLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom[0]->num_axes() >= 1)
CHECK_EQ(bottom[0]->shape(1), channels_);
top[0]->ReshapeLike(*bottom[0]);
vector<int> sz;
sz.push_back(channels_);
mean_.Reshape(sz);//通道數,即channel值大小,存儲的是均值
variance_.Reshape(sz);//通道數,即channel值大小,存儲的是方差值
temp_.ReshapeLike(*bottom[0]);//temp_中存儲的是減去mean_後的每一個數的方差值。
x_norm_.ReshapeLike(*bottom[0]);
sz[0]=bottom[0]->shape(0);
batch_sum_multiplier_.Reshape(sz);//batch_size 大小
int spatial_dim = bottom[0]->count()/(channels_*bottom[0]->shape(0));//圖像height*width
/*
*spatial_sum_multiplier_是一副圖像大小的空間(height*width),並初始化值爲 1 ,
*作用是在計算mean_時輔助通過乘的方式將一副圖像的值相加,結果是一個數值
*/
if (spatial_sum_multiplier_.num_axes() == 0 ||
spatial_sum_multiplier_.shape(0) != spatial_dim) {
sz[0] = spatial_dim;
spatial_sum_multiplier_.Reshape(sz);
Dtype* multiplier_data = spatial_sum_multiplier_.mutable_cpu_data();//分配一副圖像的空間
caffe_set(spatial_sum_multiplier_.count(), Dtype(1), multiplier_data);//初始化值爲 1,
}
int numbychans = channels_*bottom[0]->shape(0);//batch_size*channel
if (num_by_chans_.num_axes() == 0 ||
num_by_chans_.shape(0) != numbychans) {
sz[0] = numbychans;
num_by_chans_.Reshape(sz);
//batch_sum_multiplier_ batch_size大小的空間,也是輔助在計算mean_時,將所要圖像的相應的通道值相加。
caffe_set(batch_sum_multiplier_.count(), Dtype(1),
batch_sum_multiplier_.mutable_cpu_data());//分配空間,初始化爲 1,
}
}
Forwad_cpu()函數中,計算均值和方差的方式,都是通過矩陣-向量乘的方式來計算。計算過程,對照上面的公式,代碼如下:
template <typename Dtype>
void BatchNormLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
int num = bottom[0]->shape(0);
int spatial_dim = bottom[0]->count()/(bottom[0]->shape(0)*channels_);//spatial_dim值是 圖像height*width
//如果底層的blob與頂層的blob不是同一個blob
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_cpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->cpu_data(), mean_.mutable_cpu_data());
caffe_cpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->cpu_data(), variance_.mutable_cpu_data());
} else {
// compute mean 計算均值
//將每一副圖像值相加爲一個值,共有channels_ * num個值,然後再乘以 1/num*spatial_dim,結果存儲到blob num_by_chans_中
caffe_cpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, //channel*num 行;spatial_dim 列,大小是height*width
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.cpu_data(), 0.,
num_by_chans_.mutable_cpu_data());
//上面計算得到的值大小是num*channel, 將圖像的每個通道的值相加,最後獲得channel個數值,結果存儲到mean_中
caffe_cpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
mean_.mutable_cpu_data());
}
// subtract mean
//將channels_個值的均值mean_矩陣擴展到num_*channels_*height*width,並用top_data數據減去均值
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
num_by_chans_.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.cpu_data(),
spatial_sum_multiplier_.cpu_data(), 1., top_data);//用blob top_data中的數據減去mean_值
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_powx(top[0]->count(), top_data, Dtype(2),
temp_.mutable_cpu_data()); // (X-EX)^2 //對向量的每一個值求方差,結果存儲到blob temp_中
caffe_cpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.cpu_data(),
spatial_sum_multiplier_.cpu_data(), 0.,
num_by_chans_.mutable_cpu_data());//同上計算 mean_的方式,矩陣 向量 乘
caffe_cpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
variance_.mutable_cpu_data()); // E((X_EX)^2)//同上計算 mean_的方式,矩陣 向量 乘 (此處num_by_chans_轉置)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
// blob_[0] = mean_ + moving_average_fraction_* blob_[0];
caffe_cpu_axpby(mean_.count(), Dtype(1), mean_.cpu_data(),
moving_average_fraction_, this->blobs_[0]->mutable_cpu_data());//Y=alpha*X+beta*Y;
int m = bottom[0]->count()/channels_;// m = num*height*width;
//blob_[1] = bias_correction_factor * variance_ + moving_average_fraction_ * blob_[1]
Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1;
caffe_cpu_axpby(variance_.count(), bias_correction_factor,
variance_.cpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_cpu_data());
}
// normalize variance
caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data());//將 variance 每個值加一個很小的值 eps_,防止除 0的情況。
caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5),
variance_.mutable_cpu_data()); // 對 variance的每個值 求開方。
// replicate variance to input size
//以下這兩個函數同上面的mean_一樣,將channels_個值的方差variance_矩陣擴展到num_*channels_*height*width
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.cpu_data(), variance_.cpu_data(), 0.,
num_by_chans_.mutable_cpu_data());
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.cpu_data(),
spatial_sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data());
caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data);//top_data 除以 temp_
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
caffe_copy(x_norm_.count(), top_data,
x_norm_.mutable_cpu_data());//將 最後的結果top_data 數據複製 到 x_norm_中。
}
(完)