caffe源碼解讀(9)-euclidean_loss_layer.cpp

公式

參數:bottom[0]、bottom[1]、top[0]
1. (N*C*H*W) the predictions: yn^ –>bottom[0]
2. (N*C*H*W) the targets: yn –>bottom[1]
3. (1*1*1*1) the computed Euclidean loss: L

L=12Nn=1Nyn^yn22

代碼

(1) Reshape

template <typename Dtype>
void EuclideanLossLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  LossLayer<Dtype>::Reshape(bottom, top);
  //保證兩個輸入bottom[0]和bottom[1]的維度(N*C*H*W)相同
  CHECK_EQ(bottom[0]->count(1), bottom[1]->count(1))
      << "Inputs must have the same dimension.";
  diff_.ReshapeLike(*bottom[0]);//用來存放兩個bottom的差,和bottom有相同的大小
}

(2)Forward

template <typename Dtype>
void EuclideanLossLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  int count = bottom[0]->count();
  //caffe_sub做減法運算:diff_.mutable_cpu_data()=bottom[0]-bottom[1]
  caffe_sub(
      count,
      bottom[0]->cpu_data(),
      bottom[1]->cpu_data(),
      diff_.mutable_cpu_data());
  //caffe_cpu_dot做乘法運算:dot=diff_.cpu_data*diff_.cpu_data
  Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data());
  Dtype loss = dot / bottom[0]->num() / Dtype(2);//輸出的loss
  top[0]->mutable_cpu_data()[0] = loss;//將loss輸出至top[0]
}

(3)Backward

template <typename Dtype>
void EuclideanLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  for (int i = 0; i < 2; ++i) {
    if (propagate_down[i]) {
      const Dtype sign = (i == 0) ? 1 : -1;
      const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
      //bottom[i]->mutable_cpu_diff()=alpha*diff_.cpu_data()
      caffe_cpu_axpby(
          bottom[i]->count(),              // count
          alpha,                              // alpha
          diff_.cpu_data(),                   // a
          Dtype(0),                           // beta
          bottom[i]->mutable_cpu_diff());  // b
    }
  }
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章