轉自 http://m.blog.csdn.net/u013010889/article/details/76343758
一直對softmax的反向傳播的caffe代碼看不懂,最近在朱神的數學理論支撐下給我詳解了它的數學公式,才豁然開朗
Softmax公式推導:
caffe源碼
// top_diff是下一層傳過來的梯度,bottom_diff是該層往前反傳的梯度
// top_data是該層輸出到下一層的結果
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int channels = top[0]->shape(softmax_axis_);
int dim = top[0]->count() / outer_num_;
// bottom_diff = top_diff而top_diff是dloss/da(見我手寫的公式推導) shape: Cx1
caffe_copy(top[0]->count(), top_diff, bottom_diff);
for (int i = 0; i < outer_num_; ++i) {
// compute dot(top_diff, top_data) and subtract them from the bottom diff
// dloss/da和a的內積(見我手寫的公式推導),scale_data保存了該內積
for (int k = 0; k < inner_num_; ++k) {
scale_data[k] = caffe_cpu_strided_dot<Dtype>(channels,
bottom_diff + i * dim + k, inner_num_,
top_data + i * dim + k, inner_num_);
}
// subtraction
// sum_multiplier_.cpu_data()由Reshape函數定義了該向量,shape: C×1,值都爲1
// 作用是把dloss/da和a的內積這個標量變成Cx1的行向量
// bottom_diff = -1*sum_multiplier_.cpu_data()*scale_data+bottom_diff 大括號裏的減法
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1,
-1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim);
}
// elementwise multiplication
// 大括號外的對應元素相乘
caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
SoftmaxWithLoss公式推導:
caffe源碼
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
// 梯度全部設爲: ak(見我手寫的公式推導)
caffe_copy(prob_.count(), prob_data, bottom_diff);
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
// 設置ignor_label的地方,梯度設爲0
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
bottom_diff[i * dim + c * inner_num_ + j] = 0;
}
} else {
// 在k==y的地方把梯度改爲: ak-1(見我手寫的公式推導)
bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
++count;
}
}
}
// Scale gradient
Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, count);
caffe_scal(prob_.count(), loss_weight, bottom_diff);
}
}
Softmax注意點
Softmax前傳時有求指數的操作,如果z很小或者很大,很容易發生float/double的上溢和下溢。這個問題其實也是有解決辦法的,caffe源碼中求 exponential 之前將z的每一個元素減去z分量中的最大值。這樣求 exponential 的時候會碰到的最大的數就是 0 了,不會發生 overflow 的問題,但是如果其他數原本是正常範圍,現在全部被減去了一個非常大的數,於是都變成了絕對值非常大的負數,所以全部都會發生 underflow,但是 underflow 的時候得到的是 0,這其實是非常 meaningful 的近似值,而且後續的計算也不會出現奇怪的 NaN。
詳情參考這篇博客Softmax vs. Softmax-Loss: Numerical Stability
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
int channels = bottom[0]->shape(softmax_axis_);
int dim = bottom[0]->count() / outer_num_;
caffe_copy(bottom[0]->count(), bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
for (int i = 0; i < outer_num_; ++i) {
// initialize scale_data to the first plane
// 計算z分量中的最大值
caffe_copy(inner_num_, bottom_data + i * dim, scale_data);
for (int j = 0; j < channels; j++) {
for (int k = 0; k < inner_num_; k++) {
scale_data[k] = std::max(scale_data[k],
bottom_data[i * dim + j * inner_num_ + k]);
}
}
// subtraction
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_,
1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data);
// exponentiation
caffe_exp<Dtype>(dim, top_data, top_data);
// sum after exp
caffe_cpu_gemv<Dtype>(CblasTrans, channels, inner_num_, 1.,
top_data, sum_multiplier_.cpu_data(), 0., scale_data);
// division
for (int j = 0; j < channels; j++) {
caffe_div(inner_num_, top_data, scale_data, top_data);
top_data += inner_num_;
}
}
}
參考博客
深度學習筆記8:softmax層的實現
Caffe Softmax層的實現原理?