以ReLULayer爲例,比較簡單,直接上代碼
前向就是根據激活函數表達式計算,反向就是計算倒數。
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
void ReLULayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
const int count = bottom[0]->count();
//negative_slope [默認值 0] : 當輸入爲x負數時,指定輸出爲negative_slope * x;默認值爲0.
//當輸入爲x正數時,指定輸出爲x;
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
for (int i = 0; i < count; ++i) {
top_data[i] = std::max(bottom_data[i], Dtype(0))
+ negative_slope * std::min(bottom_data[i], Dtype(0));
}
}
}
*/
template <typename Dtype>
void ReLULayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->cpu_data();
const Dtype* top_diff = top[0]->cpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
for (int i = 0; i < count; ++i) {
//當輸入bottom_data大於0,則bottom_diff[i] = top_diff[i]
//當輸入bottom_data小於等於0,則bottom_diff[i] = top_diff[i] * negative_slope
bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0)
+ negative_slope * (bottom_data[i] <= 0));
}
}
}
#ifdef CPU_ONLY
STUB_GPU(ReLULayer);
#endif
INSTANTIATE_CLASS(ReLULayer);
} // namespace caffe