【深度學習】用caffe+ShuffleNet-V2做迴歸

重要參考來源

用caffe做迴歸(上)
用caffe做迴歸(下)

用到的github源碼

caffe
shuffle_channel_layer
conv_dw_layer

convert_imageset.cpp代碼詳解

功能:將數據集製作成lmdb或者leveldb數據
目前caffe原始的convert_imageset只支持單分類任務

// This program converts a set of images to a lmdb/leveldb by storing them
// as Datum proto buffers.
// Usage:
//   convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME
//
// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE
// should be a list of files as well as their labels, in the format as
//   subfolder1/file1.JPEG 7
//   ....

#include <algorithm>
#include <fstream>  // NOLINT(readability/streams)
#include <string>
#include <utility>
#include <vector>

#include "boost/scoped_ptr.hpp"
#include "gflags/gflags.h"
#include "glog/logging.h"

#include "caffe/proto/caffe.pb.h"
#include "caffe/util/db.hpp"
#include "caffe/util/format.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/rng.hpp"

using namespace caffe;  // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;

DEFINE_bool(gray, false,
    "When this option is on, treat images as grayscale ones");
    // 布爾型參數:如果是True,圖片按灰度圖處理。默認是False,也就是按BGR處理
DEFINE_bool(shuffle, false,
    "Randomly shuffle the order of images and their labels");
    // 布爾型參數:如果是True,隨機shuffle一下數據集。默認是False,也就是不shauffle
DEFINE_string(backend, "lmdb",
        "The backend {lmdb, leveldb} for storing the result");
        // 字符串參數:數據儲存結果是lmdb還是leveldb。默認是lmdb
DEFINE_int32(resize_width, 0, "Width images are resized to");
// 整型參數:圖片要resize到的width。默認是0
DEFINE_int32(resize_height, 0, "Height images are resized to");
// 整型參數:圖片要resize到的height。默認是0
DEFINE_bool(check_size, false,
    "When this option is on, check that all the datum have the same size");
    // 布爾型參數:如果是True,檢查所有數據是否是相同的大小。默認是False,也就是不檢查
DEFINE_bool(encoded, false,
    "When this option is on, the encoded image will be save in datum");
    // 布爾型參數:如果是True,編碼的圖片將會被保存到數據中。默認是False
DEFINE_string(encode_type, "",
    "Optional: What type should we encode the image as ('png','jpg',...).");
    // 字符串參數:編碼類型,png、jpg等。默認是""

int main(int argc, char** argv) {
    // argc,爲整型,用來統計程序運行時發送給main函數的命令行參數的個數
    // argv[],爲字符串數組,用來存放指向的字符串參數的指針數組
#ifdef USE_OPENCV
  ::google::InitGoogleLogging(argv[0]);
  // 初始化glog參數
  FLAGS_alsologtostderr = 1;
  // 當FLAGS_alsologtostderr爲真時,忽略FLAGS_stderrthreshold的限制,所有等級的信息都打印到終端

#ifndef GFLAGS_GFLAGS_H_
  namespace gflags = google;
  // 命名空間重定向
#endif

  gflags::SetUsageMessage("Convert a set of images to the leveldb/lmdb\n"
        "format used as input for Caffe.\n"
        "Usage:\n"
        "    convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n"
        "The ImageNet dataset for the training demo is at\n"
        "    http://www.image-net.org/download-images\n");
        // 設置命令行幫助信息。設置幫助信息後,當運行編譯生成的可執行文件時參數錯誤或加 --help 選項可以打印幫助信息
  gflags::ParseCommandLineFlags(&argc, &argv, true);
  // 解析命令行參數

  if (argc < 4) {
  // 如果命令行參數少於4個,主程序終止,返回1
    gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/convert_imageset");
    return 1;
  }

  const bool is_color = !FLAGS_gray;
  // 常量布爾型變量:如果按GRAY處理,應爲False;如果按BGR處理,應爲True
  const bool check_size = FLAGS_check_size;
  // 常量布爾型變量:如果檢查數據大小是否統一,應爲True;否則應爲False
  const bool encoded = FLAGS_encoded;
  // 常量布爾型變量:如果編碼保存,應爲True;否則應爲False
  const string encode_type = FLAGS_encode_type;
  // 常量字符串變量:"png"或是"jpg"或是""

  std::ifstream infile(argv[2]);
  // argv[2]指向執行程序名後的第二個字符串
  // 讀文件
  std::vector<std::pair<std::string, int> > lines;
  // 像數組一樣,vector使用連續存儲空間存儲元素,這意味着它們的元素也可以使用指向其元素的指針進行偏移來訪問
  // 但與數組不同的是, vector的大小可以動態變化,並且是由容器自動處理的
  // lines是文件每一行的內容,每一行是:圖片路徑 label
  std::string line;
  // 字符串型變量
  // line代表一行
  size_t pos;
  // size_t型變量,一般用於計數
  // pos記錄空格的位子
  int label;
  // 整型變量
  // label
  while (std::getline(infile, line)) {
    pos = line.find_last_of(' ');
    // 找到空格的位置,賦給pos
    label = atoi(line.substr(pos + 1).c_str());
    // 從pos+1位置,到最後,是label的內容,賦給label
    lines.push_back(std::make_pair(line.substr(0, pos), label));
    // 把圖片路徑和label成對壓進lines
  }
  if (FLAGS_shuffle) {
    LOG(INFO) << "Shuffling data";
    shuffle(lines.begin(), lines.end());
    // 如過shuffle,把lines從頭到尾shuffle一下
  }
  LOG(INFO) << "A total of " << lines.size() << " images.";
  // 打印一共有多少圖片

  if (encode_type.size() && !encoded)
  // 如果選定了某一編碼格式,encoded又是False。
    LOG(INFO) << "encode_type specified, assuming encoded=true.";
    // 就是說,如果選定了某一編碼個格式,即使encoded是False,也強行變爲True

  int resize_height = std::max<int>(0, FLAGS_resize_height);
  // 整型變量,0和resize_height的最大值
  int resize_width = std::max<int>(0, FLAGS_resize_width);
  // 整型變量,0和resize_width的最大值

  scoped_ptr<db::DB> db(db::GetDB(FLAGS_backend));
  db->Open(argv[3], db::NEW);
  scoped_ptr<db::Transaction> txn(db->NewTransaction());
  // 創建一個新的db文件

  std::string root_folder(argv[1]);
  // root_folder是圖片根路徑
  Datum datum;
  int count = 0;
  int data_size = 0;
  bool data_size_initialized = false;

  for (int line_id = 0; line_id < lines.size(); ++line_id) {
  // 對lines中的每一行進行處理
    bool status;
    std::string enc = encode_type;
    // enc是編碼格式,"png"或"jpg"或""
    if (encoded && !enc.size()) {
      // 如果encoded是True,但是enc是"",則需要猜測編碼格式
      string fn = lines[line_id].first;
      // fn是圖片路徑
      size_t p = fn.rfind('.');
      // 找到字符"."的位置
      if ( p == fn.npos )
      // 如果沒有字符".",打印猜測失敗
        LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'";
      enc = fn.substr(p);
      // 從"."的位置,一直截取到fn的最後。如果猜測失敗,enc爲""
      std::transform(enc.begin(), enc.end(), enc.begin(), ::tolower);
      // 把後綴轉化成小寫
    }
    status = ReadImageToDatum(root_folder + lines[line_id].first,
        lines[line_id].second, resize_height, resize_width, is_color,
        enc, &datum);
        // 調用函數ReadImageToDatum
        // 如果status是True則成功了,是False則失敗了
    if (status == false) continue;
    // 失敗了就跳過當前循環
    if (check_size) {
      // 成功了,可以檢查數據大小是否一致
      if (!data_size_initialized) {
        // 第一次進入if
        data_size = datum.channels() * datum.height() * datum.width();
        // 得到數據大小
        data_size_initialized = true;
      } else {
        // 以後每次都進入else
        const std::string& data = datum.data();
        CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
            << data.size();
            // 對比,判斷是否相同
      }
    }
 
    string key_str = caffe::format_int(line_id, 8) + "_" + lines[line_id].first;

   
    string out;
    CHECK(datum.SerializeToString(&out));
    txn->Put(key_str, out);

    if (++count % 1000 == 0) {
      // 每1000組數據commit一下
      txn->Commit();
      txn.reset(db->NewTransaction());
      LOG(INFO) << "Processed " << count << " files.";
    }
  }
  // write the last batch
  if (count % 1000 != 0) {
    // 最後一組不足1000的commit一下
    txn->Commit();
    LOG(INFO) << "Processed " << count << " files.";
  }
#else
  LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV.";
#endif  // USE_OPENCV
  return 0;
}

ReadImageToDatum等相關函數代碼詳解

功能:讀取圖片,並將圖片和label讀入Datum

#ifdef USE_OPENCV
cv::Mat ReadImageToCVMat(const string& filename,
    const int height, const int width, const bool is_color) {
  cv::Mat cv_img;
  int cv_read_flag = (is_color ? CV_LOAD_IMAGE_COLOR :
    CV_LOAD_IMAGE_GRAYSCALE);
  cv::Mat cv_img_origin = cv::imread(filename, cv_read_flag);
  // 讀取圖片
  if (!cv_img_origin.data) {
    LOG(ERROR) << "Could not open or find file " << filename;
    return cv_img_origin;
    // 如果沒有讀到圖,報錯返回空
  }
  if (height > 0 && width > 0) {
    cv::resize(cv_img_origin, cv_img, cv::Size(width, height));
    // 如果height>0並且width>0,圖片resize到height和width指定大小
  } else {
    cv_img = cv_img_origin;
    // 否則,保留原圖尺寸
  }
  return cv_img;
  // 返回圖片內容
}

cv::Mat ReadImageToCVMat(const string& filename,
    const int height, const int width) {
  return ReadImageToCVMat(filename, height, width, true);
}
// is_color默認是True

cv::Mat ReadImageToCVMat(const string& filename,
    const bool is_color) {
  return ReadImageToCVMat(filename, 0, 0, is_color);
}
// height和width默認是0,也就是保留原圖尺寸

cv::Mat ReadImageToCVMat(const string& filename) {
  return ReadImageToCVMat(filename, 0, 0, true);
}

static bool matchExt(const std::string & fn,
                     std::string en) {
  size_t p = fn.rfind('.');
  std::string ext = p != fn.npos ? fn.substr(p+1) : fn;
  // 如果有後綴:截取後綴
  // 如果沒有後綴:保留整個字符串
  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
  // 後綴變小寫
  std::transform(en.begin(), en.end(), en.begin(), ::tolower);
  // 編碼格式變小寫
  if ( ext == en )
  // 如果後綴和編碼格式相同,返回True
    return true;
  if ( en == "jpg" && ext == "jpeg" )
  // jpg和jpeg是同一個,也返回True
    return true;
  return false;
  // 否則返回False
}

bool ReadImageToDatum(const string& filename, const int label,
    const int height, const int width, const bool is_color,
    const std::string & encoding, Datum* datum) {
      // 第一個參數:圖片完整路徑
      // 第二個參數:圖片label
      // 第三個參數:0或height
      // 第四個參數:0或width
      // 第五個參數:按BGR處理還是按GRAY處理
      // 第六個參數:編碼格式
      // 第七個參數:datum
  cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color);
  // 調用函數ReadImageToCVMat,得到圖片
  if (cv_img.data) {
    if (encoding.size()) {
      // 如果編碼格式不是""
      if ( (cv_img.channels() == 3) == is_color && !height && !width &&
          matchExt(filename, encoding) )
          // 如果(三通道,且is_color爲True),並且(height和width都是零),並且(matchExt(filename, encoding)是True)
        return ReadFileToDatum(filename, label, datum);
        // 調用ReadFileToDatum函數
      std::vector<uchar> buf;
      cv::imencode("."+encoding, cv_img, buf);
      // 將cv::Mat數據編碼成數據流
      datum->set_data(std::string(reinterpret_cast<char*>(&buf[0]),
                      buf.size()));
      datum->set_label(label);
      datum->set_encoded(true);
      return true;
      // 返回True
    }
    // 如果沒有編碼格式是"",調用函數CVMatToDatum
    CVMatToDatum(cv_img, datum);
    datum->set_label(label);
    return true;
    // 返回True
  } else {
    return false;
    // 如果沒讀到圖,返回False
  }
}
#endif  // USE_OPENCV

bool ReadFileToDatum(const string& filename, const int label,
    Datum* datum) {
  std::streampos size;

  fstream file(filename.c_str(), ios::in|ios::binary|ios::ate);
  // 以二進制寫的方式打開圖片
  if (file.is_open()) {
    size = file.tellg();
    // 得到當前定位指針的位置,也代表着輸入流的大小
    std::string buffer(size, ' ');
    file.seekg(0, ios::beg);
    // 基地址爲文件開始處,偏移地址爲0,於是指針定位在文件開始處
    file.read(&buffer[0], size);
    file.close();
    datum->set_data(buffer);
    datum->set_label(label);
    datum->set_encoded(true);
    return true;
  } else {
    return false;
    // 如果沒打開,返回False
  }
}

void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) {
  CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte";
  datum->set_channels(cv_img.channels());
  datum->set_height(cv_img.rows);
  datum->set_width(cv_img.cols);
  datum->clear_data();
  datum->clear_float_data();
  datum->set_encoded(false);
  int datum_channels = datum->channels();
  int datum_height = datum->height();
  int datum_width = datum->width();
  int datum_size = datum_channels * datum_height * datum_width;
  std::string buffer(datum_size, ' ');
  for (int h = 0; h < datum_height; ++h) {
    const uchar* ptr = cv_img.ptr<uchar>(h);
    int img_index = 0;
    for (int w = 0; w < datum_width; ++w) {
      for (int c = 0; c < datum_channels; ++c) {
        int datum_index = (c * datum_height + h) * datum_width + w;
        buffer[datum_index] = static_cast<char>(ptr[img_index++]);
      }
    }
  }
  datum->set_data(buffer);
}

convert_imageset.cpp修改思路

因爲迴歸任務迴歸幾個浮點值,所以文件應該由原來的格式:圖片路徑[空格]label,變爲:圖片路徑[空格]浮點數1[空格]浮點數2[空格]浮點數3[空格]浮點數4[空格]…
所以處理文件以及將label(這裏是迴歸的浮點數)讀入Datum部分要做相應修改

// This program converts a set of images to a lmdb/leveldb by storing them
// as Datum proto buffers.
// Usage:
//   convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME
//
// where ROOTFOLDER is the root folder that holds all the images, and LISTFILE
// should be a list of files as well as their labels, in the format as
//   subfolder1/file1.JPEG 7
//   ....

#include <algorithm>
#include <fstream>  // NOLINT(readability/streams)
#include <string>
#include <utility>
#include <vector>

#include "boost/scoped_ptr.hpp"
#include "gflags/gflags.h"
#include "glog/logging.h"

#include "caffe/proto/caffe.pb.h"
#include "caffe/util/db.hpp"
#include "caffe/util/format.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/rng.hpp"

#include <boost/tokenizer.hpp>
// 使用tokenizer

using namespace caffe;  // NOLINT(build/namespaces)
using std::pair;
using boost::scoped_ptr;

DEFINE_bool(gray, false,
    "When this option is on, treat images as grayscale ones");
DEFINE_bool(shuffle, false,
    "Randomly shuffle the order of images and their labels");
DEFINE_string(backend, "lmdb",
        "The backend {lmdb, leveldb} for storing the result");
DEFINE_int32(resize_width, 0, "Width images are resized to");
DEFINE_int32(resize_height, 0, "Height images are resized to");
DEFINE_bool(check_size, false,
    "When this option is on, check that all the datum have the same size");
DEFINE_bool(encoded, false,
    "When this option is on, the encoded image will be save in datum");
DEFINE_string(encode_type, "",
    "Optional: What type should we encode the image as ('png','jpg',...).");

int main(int argc, char** argv) {
#ifdef USE_OPENCV
  ::google::InitGoogleLogging(argv[0]);
  // Print output to stderr (while still logging)
  FLAGS_alsologtostderr = 1;

#ifndef GFLAGS_GFLAGS_H_
  namespace gflags = google;
#endif

  gflags::SetUsageMessage("Convert a set of images to the leveldb/lmdb\n"
        "format used as input for Caffe.\n"
        "Usage:\n"
        "    convert_imageset [FLAGS] ROOTFOLDER/ LISTFILE DB_NAME\n"
        "The ImageNet dataset for the training demo is at\n"
        "    http://www.image-net.org/download-images\n");
  gflags::ParseCommandLineFlags(&argc, &argv, true);

  if (argc < 4) {
    gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/convert_imageset");
    return 1;
  }

  const bool is_color = !FLAGS_gray;
  const bool check_size = FLAGS_check_size;
  const bool encoded = FLAGS_encoded;
  const string encode_type = FLAGS_encode_type;

  std::ifstream infile(argv[2]);
  // std::vector<std::pair<std::string, int> > lines;
  std::vector<std::pair<std::string, std::vector<float> > > lines;
  // 將lines的結構變成,一個圖片的路徑與一組浮點數構成一對
  std::string line;
  // size_t pos;
  // int label;
  std::vector<float> labels;
  // labels,放一組要回歸的浮點數的值
  while (std::getline(infile, line)) {
    // pos = line.find_last_of(' ');
    // label = atoi(line.substr(pos + 1).c_str());
    // lines.push_back(std::make_pair(line.substr(0, pos), label));
    std::vector<std::string> tokens;
    // tokons,放一組字符串
    boost::char_separator<char> sep(" ");
    boost::tokenizer<boost::char_separator<char> > tok(line, sep);
    // 把line按" "切分,切出來的每項放到tok裏
    tokens.clear();
    // 每次循環清空一下tokens
    std::copy(tok.begin(), tok.end(), std::back_inserter(tokens));  
    // 複製了tok中的全部元素並將它們添加到tokens的末尾,執行完該語句後,tokens的長度將增加tok.size()
 
    for (int i = 1; i < tokens.size(); ++i)
    {
        //從第一個開始,將切分得到的每項放到labels裏
      labels.push_back(atof(tokens.at(i).c_str()));
    }
    
    lines.push_back(std::make_pair(tokens.at(0), labels));
    // 把切分得到的第0個元素,和labels成對放入lines
    labels.clear();
    // 清空一下labels
  }
  if (FLAGS_shuffle) {
    // randomly shuffle data
    LOG(INFO) << "Shuffling data";
    shuffle(lines.begin(), lines.end());
  }
  LOG(INFO) << "A total of " << lines.size() << " images.";

  if (encode_type.size() && !encoded)
    LOG(INFO) << "encode_type specified, assuming encoded=true.";

  int resize_height = std::max<int>(0, FLAGS_resize_height);
  int resize_width = std::max<int>(0, FLAGS_resize_width);

  // Create new DB
  scoped_ptr<db::DB> db(db::GetDB(FLAGS_backend));
  db->Open(argv[3], db::NEW);
  scoped_ptr<db::Transaction> txn(db->NewTransaction());

  // Storing to db
  std::string root_folder(argv[1]);
  Datum datum;
  int count = 0;
  int data_size = 0;
  bool data_size_initialized = false;

  for (int line_id = 0; line_id < lines.size(); ++line_id) {
    bool status;
    std::string enc = encode_type;
    if (encoded && !enc.size()) {
      // Guess the encoding type from the file name
      string fn = lines[line_id].first;
      size_t p = fn.rfind('.');
      if ( p == fn.npos )
        LOG(WARNING) << "Failed to guess the encoding of '" << fn << "'";
      enc = fn.substr(p+1);
      std::transform(enc.begin(), enc.end(), enc.begin(), ::tolower);
    }
    status = ReadImageToDatum(root_folder + lines[line_id].first,
        lines[line_id].second, resize_height, resize_width, is_color,
        enc, &datum);
        // 這樣第二個參數就不是label,而是labels了,所以ReadImageToDatum等相關函數也要進行修改
    if (status == false) continue;
    if (check_size) {
      if (!data_size_initialized) {
        data_size = datum.channels() * datum.height() * datum.width();
        data_size_initialized = true;
      } else {
        const std::string& data = datum.data();
        CHECK_EQ(data.size(), data_size) << "Incorrect data field size "
            << data.size();
      }
    }
    // sequential
    string key_str = caffe::format_int(line_id, 8) + "_" + lines[line_id].first;

    // Put in db
    string out;
    CHECK(datum.SerializeToString(&out));
    txn->Put(key_str, out);

    if (++count % 1000 == 0) {
      // Commit db
      txn->Commit();
      txn.reset(db->NewTransaction());
      LOG(INFO) << "Processed " << count << " files.";
    }
  }
  // write the last batch
  if (count % 1000 != 0) {
    txn->Commit();
    LOG(INFO) << "Processed " << count << " files.";
  }
#else
  LOG(FATAL) << "This tool requires OpenCV; compile with USE_OPENCV.";
#endif  // USE_OPENCV
  return 0;
}

ReadImageToDatum等相關函數修改思路

ReadImageToDatum函數的第二個參數從一個整型的label變成了一個浮點型的vector
所以,io.hpp中相關的形參類型說明也要修改

可以不改動原來的函數聲明(因爲C++支持函數重載,這裏指參數有所不同),而在它的下面接上:

bool ReadImageToDatum(const string& filename, const vector<float> labels,
    const int height, const int width, const bool is_color,
    const std::string & encoding, Datum* datum);

在bool ReadFileToDatum函數聲明下面添加:

bool ReadFileToDatum(const string& filename, const vector<float> labels, Datum* datum);

io.cpp中在ReadImageToDatum函數下面進行添加:

bool ReadImageToDatum(const string& filename, const vector<float> labels,
    const int height, const int width, const bool is_color,
    const std::string & encoding, Datum* datum) {
  cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color);
  if (cv_img.data) {
    if (encoding.size()) {
      if ( (cv_img.channels() == 3) == is_color && !height && !width &&
          matchExt(filename, encoding) )
        return ReadFileToDatum(filename, labels, datum);
        // 第二個參數由lable改爲labels
      std::vector<uchar> buf;
      cv::imencode("."+encoding, cv_img, buf);
      datum->set_data(std::string(reinterpret_cast<char*>(&buf[0]),
                      buf.size()));
      // datum->set_label(label);
      for (int i = 0; i < labels.size(); ++i)
      {
        datum->add_float_data(labels.at(i));
        // 按labels的數量,挨個往datum裏讀
      }
      datum->set_encoded(true);
      return true;
    }
    CVMatToDatum(cv_img, datum);
    // datum->set_label(label);
    for (int i = 0; i < labels.size(); ++i)
    {
      datum->add_float_data(labels.at(i));
      // 按labels的數量,挨個往datum裏讀
    }
    return true;
  } else {
    return false;
  }
}

io.cpp中在ReadFileToDatum函數下面進行添加:

bool ReadFileToDatum(const string& filename, const vector<float> labels,
    Datum* datum) {
  std::streampos size;

  fstream file(filename.c_str(), ios::in|ios::binary|ios::ate);
  if (file.is_open()) {
    size = file.tellg();
    std::string buffer(size, ' ');
    file.seekg(0, ios::beg);
    file.read(&buffer[0], size);
    file.close();
    datum->set_data(buffer);
    // datum->set_label(label);
    for (int i = 0; i < labels.size(); ++i)
    {
      datum->add_float_data(labels.at(i));
      // 按labels的數量,挨個往datum裏讀
    }
    datum->set_encoded(true);
    return true;
  } else {
    return false;
  }
}

下面內容轉載自用caffe做迴歸(上)

datum->add_float_data(labels.at(i));

這個函數是怎麼來的,第一次用的時候怎麼會知道有這個函數?
這就得來看看caffe.proto文件了,裏面關於Datum的代碼如下:

message Datum {
  optional int32 channels = 1;
  optional int32 height = 2;
  optional int32 width = 3;
  // the actual image data, in bytes
  optional bytes data = 4;
  optional int32 label = 5;
  // Optionally, the datum could also hold float data.
  repeated float float_data = 6;
  // If true data contains an encoded image that need to be decoded
  optional bool encoded = 7 [default = false];
}

.proto文件是Google開發的一種協議接口,根據這個,可以自動生成caffe.pb.h和caffe.pb.cc文件

==

optional int32 label = 5;

這條就是用於做分類的

==

repeated float float_data = 6;

這條就是用來做迴歸的

==

在caffe.pb.h文件中可以找到關於做迴歸的這部分自動生成的代碼:

  // repeated float float_data = 6;
  int float_data_size() const;
  void clear_float_data();
  static const int kFloatDataFieldNumber = 6;
  float float_data(int index) const;
  void set_float_data(int index, float value);
  void add_float_data(float value);
  const ::google::protobuf::RepeatedField< float >&
      float_data() const;
  ::google::protobuf::RepeatedField< float >*
      mutable_float_data();

data_layer.cpp代碼及修改思路

功能:從lmdb或leveldb中讀取圖片信息,先是反序列化成Datum,然後再放進Blob中

#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#endif  // USE_OPENCV
#include <stdint.h>

#include <vector>

#include "caffe/data_transformer.hpp"
#include "caffe/layers/data_layer.hpp"
#include "caffe/util/benchmark.hpp"

namespace caffe {

template <typename Dtype>
DataLayer<Dtype>::DataLayer(const LayerParameter& param)
  : BasePrefetchingDataLayer<Dtype>(param),
    offset_() {
  db_.reset(db::GetDB(param.data_param().backend()));
  db_->Open(param.data_param().source(), db::READ);
  cursor_.reset(db_->NewCursor());
}

template <typename Dtype>
DataLayer<Dtype>::~DataLayer() {
  this->StopInternalThread();
}

template <typename Dtype>
void DataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  const int batch_size = this->layer_param_.data_param().batch_size();
  // Read a data point, and use it to initialize the top blob.
  Datum datum;
  datum.ParseFromString(cursor_->value());

  // Use data_transformer to infer the expected blob shape from datum.
  vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
  this->transformed_data_.Reshape(top_shape);
  // Reshape top[0] and prefetch_data according to the batch_size.
  top_shape[0] = batch_size;
  top[0]->Reshape(top_shape);
  for (int i = 0; i < this->prefetch_.size(); ++i) {
    this->prefetch_[i]->data_.Reshape(top_shape);
  }
  LOG_IF(INFO, Caffe::root_solver())
      << "output data size: " << top[0]->num() << ","
      << top[0]->channels() << "," << top[0]->height() << ","
      << top[0]->width();
  // label
  // 處理label部分
/*
  if (this->output_labels_) {
    vector<int> label_shape(1, batch_size);
    // label_shape(1, batch_size)
    top[1]->Reshape(label_shape);
    // top[1]reshape成label_shape的尺寸
    for (int i = 0; i < this->prefetch_.size(); ++i) {
      this->prefetch_[i]->label_.Reshape(label_shape);
    }
  }
  */

  int labelNum = 4;
  // 一張圖有幾個迴歸值,這裏設置爲4
  if (this->output_labels_) {
 
    vector<int> label_shape;
    label_shape.push_back(batch_size);
    label_shape.push_back(labelNum);
    label_shape.push_back(1);
    label_shape.push_back(1);
    // push_back的四個值分別對應Blob的num,channels,height,width
    // 因爲top[1]對應的是標籤,所以num設置爲batch_size,channels設置爲labelNum,height和width設置爲1即可
    top[1]->Reshape(label_shape);
    for (int i = 0; i < this->prefetch_.size(); ++i) {
      this->prefetch_[i]->label_.Reshape(label_shape);
    }
  }
}

template <typename Dtype>
bool DataLayer<Dtype>::Skip() {
  int size = Caffe::solver_count();
  int rank = Caffe::solver_rank();
  bool keep = (offset_ % size) == rank ||
              // In test mode, only rank 0 runs, so avoid skipping
              this->layer_param_.phase() == TEST;
  return !keep;
}

template<typename Dtype>
void DataLayer<Dtype>::Next() {
  cursor_->Next();
  if (!cursor_->valid()) {
    LOG_IF(INFO, Caffe::root_solver())
        << "Restarting data prefetching from start.";
    cursor_->SeekToFirst();
  }
  offset_++;
}

// This function is called on prefetch thread
template<typename Dtype>
void DataLayer<Dtype>::load_batch(Batch<Dtype>* batch) {
  CPUTimer batch_timer;
  batch_timer.Start();
  double read_time = 0;
  double trans_time = 0;
  CPUTimer timer;
  CHECK(batch->data_.count());
  CHECK(this->transformed_data_.count());
  const int batch_size = this->layer_param_.data_param().batch_size();

  Datum datum;
  for (int item_id = 0; item_id < batch_size; ++item_id) {
    timer.Start();
    while (Skip()) {
      Next();
    }
    datum.ParseFromString(cursor_->value());
    read_time += timer.MicroSeconds();

    if (item_id == 0) {
      // Reshape according to the first datum of each batch
      // on single input batches allows for inputs of varying dimension.
      // Use data_transformer to infer the expected blob shape from datum.
      vector<int> top_shape = this->data_transformer_->InferBlobShape(datum);
      this->transformed_data_.Reshape(top_shape);
      // Reshape batch according to the batch_size.
      top_shape[0] = batch_size;
      batch->data_.Reshape(top_shape);
    }

    // Apply data transformations (mirror, scale, crop...)
    timer.Start();
    int offset = batch->data_.offset(item_id);
    Dtype* top_data = batch->data_.mutable_cpu_data();
    this->transformed_data_.set_cpu_data(top_data + offset);
    this->data_transformer_->Transform(datum, &(this->transformed_data_));
    // Copy label.
    /*
    if (this->output_labels_) {
      Dtype* top_label = batch->label_.mutable_cpu_data();
      top_label[item_id] = datum.label();
      // datum中的label值賦值給top_label
    }
    */
    int labelNum = 4;
    if (this->output_labels_) {
      Dtype* top_label = batch->label_.mutable_cpu_data();
      for(int i=0;i<labelNum;i++){
        top_label[item_id*labelNum+i] = datum.float_data(i+labelNum*item_id); //read float labels
      }
    }

    trans_time += timer.MicroSeconds();
    Next();
  }
  timer.Stop();
  batch_timer.Stop();
  DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms.";
  DLOG(INFO) << "     Read time: " << read_time / 1000 << " ms.";
  DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms.";
}

INSTANTIATE_CLASS(DataLayer);
REGISTER_LAYER_CLASS(Data);

}  // namespace caffe

修改項目清單

  1. 修改convert_imageset.cpp文件
  2. 修改io.hppio.cpp文件
  3. 修改data_layer.cpp文件
  4. 添加conv_dw_layer.cppconv_dw_layer.cu文件到/caffe_root/src/caffe/layers
  5. 添加conv_dw_layer.hpp文件到/caffe_root/include/caffe/layers
  6. 添加shuffle_channel_layer.cppshuffle_channel_layer.cu文件到/caffe_root/src/caffe/layers
  7. 添加shuffle_channel_layer.hpp文件到/caffe_root/include/caffe/layers
  8. caffe.proto中添加:
message LayerParameter {
...
optional ShuffleChannelParameter shuffle_channel_param = 164;
...
}
...
message ShuffleChannelParameter {
  optional uint32 group = 1[default = 1]; // The number of group
}

生成lmdb數據

按下面格式製作文件:

圖片名1 label1_1 label1_2 label1_3 label1_4
圖片名2 label2_1 label2_2 label2_3 label2_4
圖片名3 label3_1 label3_2 label3_3 label3_4

運行編譯生成的convert_imageset可執行文件:

./convert_imageset --gray=false --shuffle=true --backend=lmdb --resize_width=0 --resize_height=0 --check_size=false --encoded=true --encode_type=jpg 圖片根路徑 上一步製作的文件 希望保存生成lmdb文件的路徑

要注意在源碼中,生成完整圖片路徑用的是簡單的"+"。所以,圖片根路徑最後和製作的文件中圖片名最前,有且僅有一個"/",否則會報錯找不到圖片
成功生成lmdb格式的數據:
在這裏插入圖片描述

Date層和Loss層參數說明

layer {
  name:"data"
  type:"Data"
  top:"data"
  top:"label"
  include {
    phase: TRAIN
  }
  transform_param {
    mirror: true
    // 隨機鏡像圖片,默認是false:不隨機鏡像
    crop_size: 0
    // 隨機切割圖片到指定大小,默認是0:不切割  
    mean_file: "做減均值處理的均值文件,mean.binaryproto"
  }
  data_param {
    source: "lmdb數據路徑"
    batch_size: 1
    backend: LMDB
  }
}

生成.binaryproto文件的方法:
用編譯caffe生成的compute_image_mean可執行文件

./compute_image_mean --backend=lmdb lmdb文件的路徑 輸出文件.binaryproto

在這裏插入圖片描述

layer {
  name: "loss"
  type: "EuclideanLoss"
  bottom: "fc4"  
  bottom: "label" 
  top: "loss" 
}

Gconv和DWconv運算過程詳解

Gconv(組卷積)和DWconv(深度可分離卷積)是ShuffleNet-V2的核心組件。這裏用博主手繪圖詳解這兩種卷積進行的操作:
在這裏插入圖片描述
在這裏插入圖片描述

train.prototxt樣例

name: "shufflenet_v2"
layer {
  name:"data"
  type:"Data"
  top:"data"
  top:"label"
  include {
    phase: TRAIN
  }
  transform_param {
    mirror: true
    crop_size: 0
    mean_file: "train.binaryproto"
  }
  data_param {
    source: "lmdb"
    batch_size: 1
    backend: LMDB
  }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 24
    pad: 1
    kernel_size: 3
    stride: 2
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "conv1_bn"
  type: "BatchNorm"
  bottom: "conv1"
  top: "conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "conv1_scale"
  bottom: "conv1"
  top: "conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "conv1_relu"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "branch1_1_conv1"
  type: "ConvolutionDepthwise"
  bottom: "pool1"
  top: "branch1_1_conv1"
  convolution_param {
    num_output: 24
    kernel_size: 3
    stride: 2
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch1_1_conv1_bn"
  type: "BatchNorm"
  bottom: "branch1_1_conv1"
  top: "branch1_1_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch1_1_conv1_scale"
  bottom: "branch1_1_conv1"
  top: "branch1_1_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch1_1_conv2"
  type: "Convolution"
  bottom: "branch1_1_conv1"
  top: "branch1_1_conv2"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch1_1_conv2_bn"
  type: "BatchNorm"
  bottom: "branch1_1_conv2"
  top: "branch1_1_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch1_1_conv2_scale"
  bottom: "branch1_1_conv2"
  top: "branch1_1_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch1_1_conv2_relu"
  type: "ReLU"
  bottom: "branch1_1_conv2"
  top: "branch1_1_conv2"
}
layer {
  name: "branch1_2_conv1"
  type: "Convolution"
  bottom: "pool1"
  top: "branch1_2_conv1"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch1_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch1_2_conv1"
  top: "branch1_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch1_2_conv1_scale"
  bottom: "branch1_2_conv1"
  top: "branch1_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch1_2_conv1_relu"
  type: "ReLU"
  bottom: "branch1_2_conv1"
  top: "branch1_2_conv1"
}
layer {
  name: "branch1_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch1_2_conv1"
  top: "branch1_2_conv2"
  convolution_param {
    num_output: 58
    kernel_size: 3
    stride: 2
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch1_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch1_2_conv2"
  top: "branch1_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch1_2_conv2_scale"
  bottom: "branch1_2_conv2"
  top: "branch1_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch1_2_conv3"
  type: "Convolution"
  bottom: "branch1_2_conv2"
  top: "branch1_2_conv3"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch1_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch1_2_conv3"
  top: "branch1_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch1_2_conv3_scale"
  bottom: "branch1_2_conv3"
  top: "branch1_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch1_2_conv3_relu"
  type: "ReLU"
  bottom: "branch1_2_conv3"
  top: "branch1_2_conv3"
}
layer {
  name: "concat1"
  type: "Concat"
  bottom: "branch1_1_conv2"
  bottom: "branch1_2_conv3"
  top: "concat1"
}
layer {
  name: "shuffle1"
  type: "ShuffleChannel"
  bottom: "concat1"
  top: "shuffle1"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice2"
  type: "Slice"
  bottom: "shuffle1"
  top: "branch2_1"
  top: "branch2_2"
  slice_param {
    slice_point: 58
    axis: 1
  }
}
layer {
  name: "branch2_2_conv1"
  type: "Convolution"
  bottom: "branch2_2"
  top: "branch2_2_conv1"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch2_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch2_2_conv1"
  top: "branch2_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch2_2_conv1_scale"
  bottom: "branch2_2_conv1"
  top: "branch2_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch2_2_conv1_relu"
  type: "ReLU"
  bottom: "branch2_2_conv1"
  top: "branch2_2_conv1"
}
layer {
  name: "branch2_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch2_2_conv1"
  top: "branch2_2_conv2"
  convolution_param {
    num_output: 58
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch2_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch2_2_conv2"
  top: "branch2_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch2_2_conv2_scale"
  bottom: "branch2_2_conv2"
  top: "branch2_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch2_2_conv3"
  type: "Convolution"
  bottom: "branch2_2_conv2"
  top: "branch2_2_conv3"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch2_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch2_2_conv3"
  top: "branch2_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch2_2_conv3_scale"
  bottom: "branch2_2_conv3"
  top: "branch2_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch2_2_conv3_relu"
  type: "ReLU"
  bottom: "branch2_2_conv3"
  top: "branch2_2_conv3"
}
layer {
  name: "concat2"
  type: "Concat"
  bottom: "branch2_1"
  bottom: "branch2_2_conv3"
  top: "concat2"
}
layer {
  name: "shuffle2"
  type: "ShuffleChannel"
  bottom: "concat2"
  top: "shuffle2"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice3"
  type: "Slice"
  bottom: "shuffle2"
  top: "branch3_1"
  top: "branch3_2"
  slice_param {
    slice_point: 58
    axis: 1
  }
}
layer {
  name: "branch3_2_conv1"
  type: "Convolution"
  bottom: "branch3_2"
  top: "branch3_2_conv1"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch3_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch3_2_conv1"
  top: "branch3_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch3_2_conv1_scale"
  bottom: "branch3_2_conv1"
  top: "branch3_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch3_2_conv1_relu"
  type: "ReLU"
  bottom: "branch3_2_conv1"
  top: "branch3_2_conv1"
}
layer {
  name: "branch3_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch3_2_conv1"
  top: "branch3_2_conv2"
  convolution_param {
    num_output: 58
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch3_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch3_2_conv2"
  top: "branch3_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch3_2_conv2_scale"
  bottom: "branch3_2_conv2"
  top: "branch3_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch3_2_conv3"
  type: "Convolution"
  bottom: "branch3_2_conv2"
  top: "branch3_2_conv3"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch3_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch3_2_conv3"
  top: "branch3_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch3_2_conv3_scale"
  bottom: "branch3_2_conv3"
  top: "branch3_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch3_2_conv3_relu"
  type: "ReLU"
  bottom: "branch3_2_conv3"
  top: "branch3_2_conv3"
}
layer {
  name: "concat3"
  type: "Concat"
  bottom: "branch3_1"
  bottom: "branch3_2_conv3"
  top: "concat3"
}
layer {
  name: "shuffle3"
  type: "ShuffleChannel"
  bottom: "concat3"
  top: "shuffle3"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice4"
  type: "Slice"
  bottom: "shuffle3"
  top: "branch4_1"
  top: "branch4_2"
  slice_param {
    slice_point: 58
    axis: 1
  }
}
layer {
  name: "branch4_2_conv1"
  type: "Convolution"
  bottom: "branch4_2"
  top: "branch4_2_conv1"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch4_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch4_2_conv1"
  top: "branch4_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch4_2_conv1_scale"
  bottom: "branch4_2_conv1"
  top: "branch4_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch4_2_conv1_relu"
  type: "ReLU"
  bottom: "branch4_2_conv1"
  top: "branch4_2_conv1"
}
layer {
  name: "branch4_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch4_2_conv1"
  top: "branch4_2_conv2"
  convolution_param {
    num_output: 58
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch4_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch4_2_conv2"
  top: "branch4_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch4_2_conv2_scale"
  bottom: "branch4_2_conv2"
  top: "branch4_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch4_2_conv3"
  type: "Convolution"
  bottom: "branch4_2_conv2"
  top: "branch4_2_conv3"
  convolution_param {
    num_output: 58
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch4_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch4_2_conv3"
  top: "branch4_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch4_2_conv3_scale"
  bottom: "branch4_2_conv3"
  top: "branch4_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch4_2_conv3_relu"
  type: "ReLU"
  bottom: "branch4_2_conv3"
  top: "branch4_2_conv3"
}
layer {
  name: "concat4"
  type: "Concat"
  bottom: "branch4_1"
  bottom: "branch4_2_conv3"
  top: "concat4"
}
layer {
  name: "shuffle4"
  type: "ShuffleChannel"
  bottom: "concat4"
  top: "shuffle4"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "branch5_1_conv1"
  type: "ConvolutionDepthwise"
  bottom: "shuffle4"
  top: "branch5_1_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 2
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch5_1_conv1_bn"
  type: "BatchNorm"
  bottom: "branch5_1_conv1"
  top: "branch5_1_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch5_1_conv1_scale"
  bottom: "branch5_1_conv1"
  top: "branch5_1_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch5_1_conv2"
  type: "Convolution"
  bottom: "branch5_1_conv1"
  top: "branch5_1_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch5_1_conv2_bn"
  type: "BatchNorm"
  bottom: "branch5_1_conv2"
  top: "branch5_1_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch5_1_conv2_scale"
  bottom: "branch5_1_conv2"
  top: "branch5_1_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch5_1_conv2_relu"
  type: "ReLU"
  bottom: "branch5_1_conv2"
  top: "branch5_1_conv2"
}
layer {
  name: "branch5_2_conv1"
  type: "Convolution"
  bottom: "shuffle4"
  top: "branch5_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch5_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch5_2_conv1"
  top: "branch5_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch5_2_conv1_scale"
  bottom: "branch5_2_conv1"
  top: "branch5_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch5_2_conv1_relu"
  type: "ReLU"
  bottom: "branch5_2_conv1"
  top: "branch5_2_conv1"
}
layer {
  name: "branch5_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch5_2_conv1"
  top: "branch5_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 2
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch5_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch5_2_conv2"
  top: "branch5_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch5_2_conv2_scale"
  bottom: "branch5_2_conv2"
  top: "branch5_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch5_2_conv3"
  type: "Convolution"
  bottom: "branch5_2_conv2"
  top: "branch5_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch5_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch5_2_conv3"
  top: "branch5_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch5_2_conv3_scale"
  bottom: "branch5_2_conv3"
  top: "branch5_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch5_2_conv3_relu"
  type: "ReLU"
  bottom: "branch5_2_conv3"
  top: "branch5_2_conv3"
}
layer {
  name: "concat5"
  type: "Concat"
  bottom: "branch5_1_conv2"
  bottom: "branch5_2_conv3"
  top: "concat5"
}
layer {
  name: "shuffle5"
  type: "ShuffleChannel"
  bottom: "concat5"
  top: "shuffle5"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice6"
  type: "Slice"
  bottom: "shuffle5"
  top: "branch6_1"
  top: "branch6_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch6_2_conv1"
  type: "Convolution"
  bottom: "branch6_2"
  top: "branch6_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch6_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch6_2_conv1"
  top: "branch6_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch6_2_conv1_scale"
  bottom: "branch6_2_conv1"
  top: "branch6_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch6_2_conv1_relu"
  type: "ReLU"
  bottom: "branch6_2_conv1"
  top: "branch6_2_conv1"
}
layer {
  name: "branch6_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch6_2_conv1"
  top: "branch6_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch6_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch6_2_conv2"
  top: "branch6_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch6_2_conv2_scale"
  bottom: "branch6_2_conv2"
  top: "branch6_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch6_2_conv3"
  type: "Convolution"
  bottom: "branch6_2_conv2"
  top: "branch6_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch6_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch6_2_conv3"
  top: "branch6_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch6_2_conv3_scale"
  bottom: "branch6_2_conv3"
  top: "branch6_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch6_2_conv3_relu"
  type: "ReLU"
  bottom: "branch6_2_conv3"
  top: "branch6_2_conv3"
}
layer {
  name: "concat6"
  type: "Concat"
  bottom: "branch6_1"
  bottom: "branch6_2_conv3"
  top: "concat6"
}
layer {
  name: "shuffle6"
  type: "ShuffleChannel"
  bottom: "concat6"
  top: "shuffle6"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice7"
  type: "Slice"
  bottom: "shuffle6"
  top: "branch7_1"
  top: "branch7_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch7_2_conv1"
  type: "Convolution"
  bottom: "branch7_2"
  top: "branch7_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch7_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch7_2_conv1"
  top: "branch7_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch7_2_conv1_scale"
  bottom: "branch7_2_conv1"
  top: "branch7_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch7_2_conv1_relu"
  type: "ReLU"
  bottom: "branch7_2_conv1"
  top: "branch7_2_conv1"
}
layer {
  name: "branch7_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch7_2_conv1"
  top: "branch7_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch7_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch7_2_conv2"
  top: "branch7_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch7_2_conv2_scale"
  bottom: "branch7_2_conv2"
  top: "branch7_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch7_2_conv3"
  type: "Convolution"
  bottom: "branch7_2_conv2"
  top: "branch7_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch7_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch7_2_conv3"
  top: "branch7_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch7_2_conv3_scale"
  bottom: "branch7_2_conv3"
  top: "branch7_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch7_2_conv3_relu"
  type: "ReLU"
  bottom: "branch7_2_conv3"
  top: "branch7_2_conv3"
}
layer {
  name: "concat7"
  type: "Concat"
  bottom: "branch7_1"
  bottom: "branch7_2_conv3"
  top: "concat7"
}
layer {
  name: "shuffle7"
  type: "ShuffleChannel"
  bottom: "concat7"
  top: "shuffle7"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice8"
  type: "Slice"
  bottom: "shuffle7"
  top: "branch8_1"
  top: "branch8_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch8_2_conv1"
  type: "Convolution"
  bottom: "branch8_2"
  top: "branch8_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch8_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch8_2_conv1"
  top: "branch8_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch8_2_conv1_scale"
  bottom: "branch8_2_conv1"
  top: "branch8_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch8_2_conv1_relu"
  type: "ReLU"
  bottom: "branch8_2_conv1"
  top: "branch8_2_conv1"
}
layer {
  name: "branch8_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch8_2_conv1"
  top: "branch8_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch8_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch8_2_conv2"
  top: "branch8_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch8_2_conv2_scale"
  bottom: "branch8_2_conv2"
  top: "branch8_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch8_2_conv3"
  type: "Convolution"
  bottom: "branch8_2_conv2"
  top: "branch8_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch8_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch8_2_conv3"
  top: "branch8_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch8_2_conv3_scale"
  bottom: "branch8_2_conv3"
  top: "branch8_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch8_2_conv3_relu"
  type: "ReLU"
  bottom: "branch8_2_conv3"
  top: "branch8_2_conv3"
}
layer {
  name: "concat8"
  type: "Concat"
  bottom: "branch8_1"
  bottom: "branch8_2_conv3"
  top: "concat8"
}
layer {
  name: "shuffle8"
  type: "ShuffleChannel"
  bottom: "concat8"
  top: "shuffle8"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice9"
  type: "Slice"
  bottom: "shuffle8"
  top: "branch9_1"
  top: "branch9_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch9_2_conv1"
  type: "Convolution"
  bottom: "branch9_2"
  top: "branch9_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch9_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch9_2_conv1"
  top: "branch9_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch9_2_conv1_scale"
  bottom: "branch9_2_conv1"
  top: "branch9_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch9_2_conv1_relu"
  type: "ReLU"
  bottom: "branch9_2_conv1"
  top: "branch9_2_conv1"
}
layer {
  name: "branch9_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch9_2_conv1"
  top: "branch9_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch9_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch9_2_conv2"
  top: "branch9_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch9_2_conv2_scale"
  bottom: "branch9_2_conv2"
  top: "branch9_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch9_2_conv3"
  type: "Convolution"
  bottom: "branch9_2_conv2"
  top: "branch9_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch9_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch9_2_conv3"
  top: "branch9_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch9_2_conv3_scale"
  bottom: "branch9_2_conv3"
  top: "branch9_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch9_2_conv3_relu"
  type: "ReLU"
  bottom: "branch9_2_conv3"
  top: "branch9_2_conv3"
}
layer {
  name: "concat9"
  type: "Concat"
  bottom: "branch9_1"
  bottom: "branch9_2_conv3"
  top: "concat9"
}
layer {
  name: "shuffle9"
  type: "ShuffleChannel"
  bottom: "concat9"
  top: "shuffle9"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice10"
  type: "Slice"
  bottom: "shuffle9"
  top: "branch10_1"
  top: "branch10_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch10_2_conv1"
  type: "Convolution"
  bottom: "branch10_2"
  top: "branch10_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch10_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch10_2_conv1"
  top: "branch10_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch10_2_conv1_scale"
  bottom: "branch10_2_conv1"
  top: "branch10_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch10_2_conv1_relu"
  type: "ReLU"
  bottom: "branch10_2_conv1"
  top: "branch10_2_conv1"
}
layer {
  name: "branch10_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch10_2_conv1"
  top: "branch10_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch10_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch10_2_conv2"
  top: "branch10_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch10_2_conv2_scale"
  bottom: "branch10_2_conv2"
  top: "branch10_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch10_2_conv3"
  type: "Convolution"
  bottom: "branch10_2_conv2"
  top: "branch10_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch10_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch10_2_conv3"
  top: "branch10_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch10_2_conv3_scale"
  bottom: "branch10_2_conv3"
  top: "branch10_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch10_2_conv3_relu"
  type: "ReLU"
  bottom: "branch10_2_conv3"
  top: "branch10_2_conv3"
}
layer {
  name: "concat10"
  type: "Concat"
  bottom: "branch10_1"
  bottom: "branch10_2_conv3"
  top: "concat10"
}
layer {
  name: "shuffle10"
  type: "ShuffleChannel"
  bottom: "concat10"
  top: "shuffle10"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice11"
  type: "Slice"
  bottom: "shuffle10"
  top: "branch11_1"
  top: "branch11_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch11_2_conv1"
  type: "Convolution"
  bottom: "branch11_2"
  top: "branch11_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch11_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch11_2_conv1"
  top: "branch11_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch11_2_conv1_scale"
  bottom: "branch11_2_conv1"
  top: "branch11_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch11_2_conv1_relu"
  type: "ReLU"
  bottom: "branch11_2_conv1"
  top: "branch11_2_conv1"
}
layer {
  name: "branch11_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch11_2_conv1"
  top: "branch11_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch11_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch11_2_conv2"
  top: "branch11_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch11_2_conv2_scale"
  bottom: "branch11_2_conv2"
  top: "branch11_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch11_2_conv3"
  type: "Convolution"
  bottom: "branch11_2_conv2"
  top: "branch11_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch11_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch11_2_conv3"
  top: "branch11_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch11_2_conv3_scale"
  bottom: "branch11_2_conv3"
  top: "branch11_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch11_2_conv3_relu"
  type: "ReLU"
  bottom: "branch11_2_conv3"
  top: "branch11_2_conv3"
}
layer {
  name: "concat11"
  type: "Concat"
  bottom: "branch11_1"
  bottom: "branch11_2_conv3"
  top: "concat11"
}
layer {
  name: "shuffle11"
  type: "ShuffleChannel"
  bottom: "concat11"
  top: "shuffle11"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice12"
  type: "Slice"
  bottom: "shuffle11"
  top: "branch12_1"
  top: "branch12_2"
  slice_param {
    slice_point: 116
    axis: 1
  }
}
layer {
  name: "branch12_2_conv1"
  type: "Convolution"
  bottom: "branch12_2"
  top: "branch12_2_conv1"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch12_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch12_2_conv1"
  top: "branch12_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch12_2_conv1_scale"
  bottom: "branch12_2_conv1"
  top: "branch12_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch12_2_conv1_relu"
  type: "ReLU"
  bottom: "branch12_2_conv1"
  top: "branch12_2_conv1"
}
layer {
  name: "branch12_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch12_2_conv1"
  top: "branch12_2_conv2"
  convolution_param {
    num_output: 116
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch12_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch12_2_conv2"
  top: "branch12_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch12_2_conv2_scale"
  bottom: "branch12_2_conv2"
  top: "branch12_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch12_2_conv3"
  type: "Convolution"
  bottom: "branch12_2_conv2"
  top: "branch12_2_conv3"
  convolution_param {
    num_output: 116
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch12_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch12_2_conv3"
  top: "branch12_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch12_2_conv3_scale"
  bottom: "branch12_2_conv3"
  top: "branch12_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch12_2_conv3_relu"
  type: "ReLU"
  bottom: "branch12_2_conv3"
  top: "branch12_2_conv3"
}
layer {
  name: "concat12"
  type: "Concat"
  bottom: "branch12_1"
  bottom: "branch12_2_conv3"
  top: "concat12"
}
layer {
  name: "shuffle12"
  type: "ShuffleChannel"
  bottom: "concat12"
  top: "shuffle12"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "branch13_1_conv1"
  type: "ConvolutionDepthwise"
  bottom: "shuffle12"
  top: "branch13_1_conv1"
  convolution_param {
    num_output: 232
    kernel_size: 3
    stride: 2
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch13_1_conv1_bn"
  type: "BatchNorm"
  bottom: "branch13_1_conv1"
  top: "branch13_1_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch13_1_conv1_scale"
  bottom: "branch13_1_conv1"
  top: "branch13_1_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch13_1_conv2"
  type: "Convolution"
  bottom: "branch13_1_conv1"
  top: "branch13_1_conv2"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch13_1_conv2_bn"
  type: "BatchNorm"
  bottom: "branch13_1_conv2"
  top: "branch13_1_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch13_1_conv2_scale"
  bottom: "branch13_1_conv2"
  top: "branch13_1_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch13_1_conv2_relu"
  type: "ReLU"
  bottom: "branch13_1_conv2"
  top: "branch13_1_conv2"
}
layer {
  name: "branch13_2_conv1"
  type: "Convolution"
  bottom: "shuffle12"
  top: "branch13_2_conv1"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch13_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch13_2_conv1"
  top: "branch13_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch13_2_conv1_scale"
  bottom: "branch13_2_conv1"
  top: "branch13_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch13_2_conv1_relu"
  type: "ReLU"
  bottom: "branch13_2_conv1"
  top: "branch13_2_conv1"
}
layer {
  name: "branch13_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch13_2_conv1"
  top: "branch13_2_conv2"
  convolution_param {
    num_output: 232
    kernel_size: 3
    stride: 2
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch13_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch13_2_conv2"
  top: "branch13_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch13_2_conv2_scale"
  bottom: "branch13_2_conv2"
  top: "branch13_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch13_2_conv3"
  type: "Convolution"
  bottom: "branch13_2_conv2"
  top: "branch13_2_conv3"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch13_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch13_2_conv3"
  top: "branch13_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch13_2_conv3_scale"
  bottom: "branch13_2_conv3"
  top: "branch13_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch13_2_conv3_relu"
  type: "ReLU"
  bottom: "branch13_2_conv3"
  top: "branch13_2_conv3"
}
layer {
  name: "concat13"
  type: "Concat"
  bottom: "branch13_1_conv2"
  bottom: "branch13_2_conv3"
  top: "concat13"
}
layer {
  name: "shuffle13"
  type: "ShuffleChannel"
  bottom: "concat13"
  top: "shuffle13"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice14"
  type: "Slice"
  bottom: "shuffle13"
  top: "branch14_1"
  top: "branch14_2"
  slice_param {
    slice_point: 232
    axis: 1
  }
}
layer {
  name: "branch14_2_conv1"
  type: "Convolution"
  bottom: "branch14_2"
  top: "branch14_2_conv1"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch14_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch14_2_conv1"
  top: "branch14_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch14_2_conv1_scale"
  bottom: "branch14_2_conv1"
  top: "branch14_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch14_2_conv1_relu"
  type: "ReLU"
  bottom: "branch14_2_conv1"
  top: "branch14_2_conv1"
}
layer {
  name: "branch14_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch14_2_conv1"
  top: "branch14_2_conv2"
  convolution_param {
    num_output: 232
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch14_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch14_2_conv2"
  top: "branch14_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch14_2_conv2_scale"
  bottom: "branch14_2_conv2"
  top: "branch14_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch14_2_conv3"
  type: "Convolution"
  bottom: "branch14_2_conv2"
  top: "branch14_2_conv3"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch14_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch14_2_conv3"
  top: "branch14_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch14_2_conv3_scale"
  bottom: "branch14_2_conv3"
  top: "branch14_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch14_2_conv3_relu"
  type: "ReLU"
  bottom: "branch14_2_conv3"
  top: "branch14_2_conv3"
}
layer {
  name: "concat14"
  type: "Concat"
  bottom: "branch14_1"
  bottom: "branch14_2_conv3"
  top: "concat14"
}
layer {
  name: "shuffle14"
  type: "ShuffleChannel"
  bottom: "concat14"
  top: "shuffle14"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice15"
  type: "Slice"
  bottom: "shuffle14"
  top: "branch15_1"
  top: "branch15_2"
  slice_param {
    slice_point: 232
    axis: 1
  }
}
layer {
  name: "branch15_2_conv1"
  type: "Convolution"
  bottom: "branch15_2"
  top: "branch15_2_conv1"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch15_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch15_2_conv1"
  top: "branch15_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch15_2_conv1_scale"
  bottom: "branch15_2_conv1"
  top: "branch15_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch15_2_conv1_relu"
  type: "ReLU"
  bottom: "branch15_2_conv1"
  top: "branch15_2_conv1"
}
layer {
  name: "branch15_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch15_2_conv1"
  top: "branch15_2_conv2"
  convolution_param {
    num_output: 232
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch15_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch15_2_conv2"
  top: "branch15_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch15_2_conv2_scale"
  bottom: "branch15_2_conv2"
  top: "branch15_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch15_2_conv3"
  type: "Convolution"
  bottom: "branch15_2_conv2"
  top: "branch15_2_conv3"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch15_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch15_2_conv3"
  top: "branch15_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch15_2_conv3_scale"
  bottom: "branch15_2_conv3"
  top: "branch15_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch15_2_conv3_relu"
  type: "ReLU"
  bottom: "branch15_2_conv3"
  top: "branch15_2_conv3"
}
layer {
  name: "concat15"
  type: "Concat"
  bottom: "branch15_1"
  bottom: "branch15_2_conv3"
  top: "concat15"
}
layer {
  name: "shuffle15"
  type: "ShuffleChannel"
  bottom: "concat15"
  top: "shuffle15"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "slice16"
  type: "Slice"
  bottom: "shuffle15"
  top: "branch16_1"
  top: "branch16_2"
  slice_param {
    slice_point: 232
    axis: 1
  }
}
layer {
  name: "branch16_2_conv1"
  type: "Convolution"
  bottom: "branch16_2"
  top: "branch16_2_conv1"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch16_2_conv1_bn"
  type: "BatchNorm"
  bottom: "branch16_2_conv1"
  top: "branch16_2_conv1"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch16_2_conv1_scale"
  bottom: "branch16_2_conv1"
  top: "branch16_2_conv1"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch16_2_conv1_relu"
  type: "ReLU"
  bottom: "branch16_2_conv1"
  top: "branch16_2_conv1"
}
layer {
  name: "branch16_2_conv2"
  type: "ConvolutionDepthwise"
  bottom: "branch16_2_conv1"
  top: "branch16_2_conv2"
  convolution_param {
    num_output: 232
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch16_2_conv2_bn"
  type: "BatchNorm"
  bottom: "branch16_2_conv2"
  top: "branch16_2_conv2"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch16_2_conv2_scale"
  bottom: "branch16_2_conv2"
  top: "branch16_2_conv2"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch16_2_conv3"
  type: "Convolution"
  bottom: "branch16_2_conv2"
  top: "branch16_2_conv3"
  convolution_param {
    num_output: 232
    kernel_size: 1
    stride: 1
    pad: 0
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "branch16_2_conv3_bn"
  type: "BatchNorm"
  bottom: "branch16_2_conv3"
  top: "branch16_2_conv3"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "branch16_2_conv3_scale"
  bottom: "branch16_2_conv3"
  top: "branch16_2_conv3"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "branch16_2_conv3_relu"
  type: "ReLU"
  bottom: "branch16_2_conv3"
  top: "branch16_2_conv3"
}
layer {
  name: "concat16"
  type: "Concat"
  bottom: "branch16_1"
  bottom: "branch16_2_conv3"
  top: "concat16"
}
layer {
  name: "shuffle16"
  type: "ShuffleChannel"
  bottom: "concat16"
  top: "shuffle16"
  shuffle_channel_param {
    group: 2
  }
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "shuffle16"
  top: "conv5"
  convolution_param {
    num_output: 1024
    pad: 0
    kernel_size: 1
    stride: 1
    bias_term: false
    weight_filler {
      type: "msra"
    }
  }
}
layer {
  name: "conv5_bn"
  type: "BatchNorm"
  bottom: "conv5"
  top: "conv5"
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
  param {
    lr_mult: 0
    decay_mult: 0
  }
}
layer {
  name: "conv5_scale"
  bottom: "conv5"
  top: "conv5"
  type: "Scale"
  scale_param {
    filler {
      value: 1
    }
    bias_term: true
    bias_filler {
      value: 0
    }
  }
}
layer {
  name: "conv5_relu"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}
layer {
  name: "pool_ave"
  type: "Pooling"
  bottom: "conv5"
  top: "pool_ave"
  pooling_param {
    global_pooling : true
    pool: AVE
  }
}
layer {
  name: "fc1000"
  type: "Convolution"
  bottom: "pool_ave"
  top: "fc1000"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 1000
    kernel_size: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "fc4"
  type: "Convolution"
  bottom: "fc1000"
  top: "fc4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 4
    kernel_size: 1
    weight_filler {
      type: "msra"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "loss"
  type: "EuclideanLoss"
  bottom: "fc4"  
  bottom: "label" 
  top: "loss" 
}

結語

如果您有修改意見或問題,歡迎留言或者通過郵箱和我聯繫。
手打很辛苦,如果我的文章對您有幫助,轉載請註明出處。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章