windows-caffe下unet實現

 

在keras框架下訓練unet,結果很好。但是在caffe框架下訓練U-Net,效果總是不理想。思來想去只有兩個地方不一樣:

1. loss function的不同

2. 上採樣的實現

1. 對於loss function:

caffe使用的是sigmoidcrossentropy,keras是binarycrossentropy

其實這兩個是一個東西:只不過caffe把最後一層sigmoid層寫道cross entropy loss裏面去了;而keras是讓輸出經過一個sigmoid層以後再輸入到binarycrossentropy loss層中。本質上是一樣的

2. 上採樣層

caffe是deconvolution,keras是upsampling+convolution層

deconvolution層是使用反捲積的方式擴大feature map,有參數

而upsampling層沒有參數,就是雙線性插值擴大feature map,再通過2*2的convolution層對feature map做一個卷積操作。

二者區別很大,也是導致caffe下unet訓不出來的根本原因。

1)對於deconvolution層,其參數較難初始化,因此有兩種方式較好地優化參數,一種是finetune,很遺憾unet沒有訓好的caffe版本。因此用HED代替UNET做分割,HED有訓練好的model。

第二種是寫一個腳本,直接給deconvolution層賦值一個參數,並在後續的訓練中不更新這個參數。具體做法參考FCN的solver.py以及surgery.py

以上兩種方法都嘗試了,但是結果還是沒有keras訓練的好,比直接訓練結果會好一點。

2)在caffe中加一個upsampling層,再改寫prototxt網絡使其和keras結構完全一樣。

因爲caffe沒有upsampling層,弄起來比較費勁,參考yolo的caffe版,裏面有upsampling層,將這個層copy到自己的caffe中,重新編譯一下。

分別有四個文件需要更改\include\caffe\layers\upsample_layer.hpp


#ifndef CAFFE_UPSAMPLE_LAYER_HPP_
#define CAFFE_UPSAMPLE_LAYER_HPP_
 
#include <vector>
 
#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"
 
namespace caffe {
 
template <typename Dtype>
class UpsampleLayer : public Layer<Dtype> {
 public:
  explicit UpsampleLayer(const LayerParameter& param)
      : Layer<Dtype>(param) {}
  virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
 
  virtual inline const char* type() const { return "Upsample"; }
  virtual inline int MinBottomBlobs() const { return 1; }
  virtual inline int MaxBottomBlobs() const { return 1; }
  virtual inline int ExactNumTopBlobs() const { return 1; }
 
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top);
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
 
 private:
  int scale_;
};
 
 
 
}  // namespace caffe
 
#endif  // CAFFE_UPSAMPLE_LAYER_HPP_

\src\caffe\layers\D:\iyuzu\caffe-master\src\caffe\layers.cpp

#include <vector>
#include "caffe/layers/upsample_layer.hpp"
 
namespace caffe {
 
template <typename Dtype>
void UpsampleLayer<Dtype>::LayerSetUp(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  UpsampleParameter upsample_param = this->layer_param_.upsample_param();
  scale_ = upsample_param.scale();
}
 
template <typename Dtype>
void UpsampleLayer<Dtype>::Reshape(
  const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
  vector<int> out_shape;
  for (int i = 0; i < bottom[0]->num_axes(); i++) {
    out_shape.push_back(bottom[0]->shape(i));
  }
 
  out_shape[bottom[0]->num_axes() - 1] *= scale_;
  out_shape[bottom[0]->num_axes() - 2] *= scale_;
  top[0]->Reshape(out_shape);
}
 
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
 
  int N = top[0]->shape(0);
  int C = top[0]->shape(1);
  int H = top[0]->shape(2);
  int W = top[0]->shape(3);
 
  const Dtype *input = bottom[0]->cpu_data();
  Dtype *output = top[0]->mutable_cpu_data();
  for (int n = 0; n < N; n++) {
    for (int c = 0; c < C; c++) {
      for (int h = 0; h < H; h++) {
        for (int w = 0; w < W; w++) {
          int nw = w/scale_;
          int nh = h/scale_;
          int out_idx = (((n * C + c) * H) + h) * W + w;
          int in_idx = (((n * C + c) * (H / scale_)) + nh) * (W / scale_) + nw;
          output[out_idx] = input[in_idx];
        }
      }
    }
  }
}
 
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  int N = bottom[0]->shape(0);
  int C = bottom[0]->shape(1);
  int H = bottom[0]->shape(2);
  int W = bottom[0]->shape(3);
  const Dtype *output_grad = top[0]->cpu_diff();
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
  caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);
  for (int n = 0; n < N; n++) {
    for (int c = 0; c < C; c++) {
      for (int h = 0; h < H; h++) {
        for (int w = 0; w < W; w++) {
          for (int i = 0; i < scale_; i++) {
            for (int j = 0; j < scale_; j++) {
              int nw = w * scale_ + i;
              int nh = h * scale_ + j;
              int out_idx = (((n * C + c) * H) + h) * W + w;
              int in_idx = (((n * C + c) * (H * scale_))
                  + nh) * (W * scale_) + nw;
              bottom_diff[out_idx] += output_grad[in_idx];
            }
          }
        }
      }
    }
  }
}
 
#ifdef CPU_ONLY
STUB_GPU(UpsampleLayer);
#endif
 
INSTANTIATE_CLASS(UpsampleLayer);
REGISTER_LAYER_CLASS(Upsample);
 
}  // namespace caffe

\src\caffe\layers\upsample_layer.cu

#include <vector>
 
#include "caffe/filler.hpp"
#include "caffe/layers/upsample_layer.hpp"
#include "caffe/util/math_functions.hpp"
 
namespace caffe {
 
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
  int x, y, z, w;
  w = ii % d3;
  ii = ii/d3;
  z = ii % d2;
  ii = ii/d2;
  y = ii % d1;
  ii = ii/d1;
  x = ii;
  w = w/scale_factor;
  z = z/scale_factor;
  d2 /= scale_factor;
  d3 /= scale_factor;
  return (((x*d1+y)*d2)+z)*d3+w;
}
 
__device__ int translate_idx_inv(
    int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
  int x, y, z, w;
  w = ii % d3;
  ii = ii/d3;
  z = ii % d2;
  ii = ii/d2;
  y = ii % d1;
  ii = ii/d1;
  x = ii;
  w = w*scale_factor+off_x;
  z = z*scale_factor+off_y;
  d2 *= scale_factor;
  d3 *= scale_factor;
  return (((x*d1+y)*d2)+z)*d3+w;
}
 
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
        int no_elements, int scale_factor, int d1, int d2, int d3) {
  int ii = threadIdx.x + blockDim.x * blockIdx.x;
  if (ii >= no_elements) return;
  int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
  output[ii]=input[ipidx];
}
 
template <typename Dtype>
__global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data,
                          int no_elements, int scale_factor, int d1, int d2,
                          int d3) {
  int ii = threadIdx.x + blockDim.x * blockIdx.x;
  if (ii >= no_elements) return;
  for (int i = 0; i < scale_factor; i++) {
    for (int j = 0; j < scale_factor; j++) {
      int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
      gradInput_data[ii] += gradOutput_data[ipidx];
    }
  }
}
 
 
 
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  int d1, d2, d3;
 
  d1 = top[0]->shape(1);
  d2 = top[0]->shape(2);
  d3 = top[0]->shape(3);
 
  int no_elements = top[0]->count();
 
  upscale<Dtype>  // NOLINT_NEXT_LINE(whitespace/operators)
      <<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>(
      bottom[0]->gpu_data(),
      top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3);
}
 
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  int d1, d2, d3;
  Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
  d1 = bottom[0]->shape(1);
  d2 = bottom[0]->shape(2);
  d3 = bottom[0]->shape(3);
  int no_elements = bottom[0]->count();
  caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
  downscale<Dtype>  // NOLINT_NEXT_LINE(whitespace/operators)
      <<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>(
      bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3);
}
 
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
 
}  // namespace caffe

src\caffe\proto\caffe.proto

在message LayerParameter {   

 中增加一個upsample layer的ID,不能與別的重複,我這裏是152

   // Parameter for upsample_layer.
  optional UpsampleParameter upsample_param = 152;

下面再添加一行用於設置的參數默認值 upsample layer,這個要加在message LayerParameter {   }的外面


// message for upsample parameter 
message UpsampleParameter {
  optional int32 scale = 1 [default = 1];
}

重新編譯caffe.編譯方法上一篇有寫到.

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章