你一定有這樣一種感受,就是當網絡模型層數較少的時候,還可以接受手動寫配置文件,當網絡層數多了之後,就會自己寫的寫的就亂了,現在介紹用Python編寫配置文件,不僅方便編寫,而且觀看也比較舒服。
以mnist的LeNet爲例,直接上代碼:
# encoding: utf-8
#!/usr/bin/python
'''
Created on 2017.06.12
示例:Python生成LeNet,相當於在prototxt文件中創建網絡
@Author: DR Xiao
'''
#caffe模塊要在Python的路徑下;
#這裏我們將把caffe 模塊添加到Python路徑下.
import sys
caffe_root = 'pycaffe' #該文件要從路徑{caffe_root}/examples下運行,否則要調整這一行。
#sys.path.insert(0, caffe_root + 'pycaffe')
sys.path.insert(0, caffe_root)
import caffe
from caffe import layers as L
from caffe import params as P
class LeNet(object):
def __init__(self,lmdb_train,lmdb_test,num_output):
self.train_data = lmdb_train
self.test_data = lmdb_test
self.class_num = num_output
def lenet_proto(self,batch_size):
n = caffe.NetSpec()
n.data,n.label = L.Data(source=self.train_data,
backend=P.Data.LMDB,
batch_size=batch_size,
ntop=2,
transform_param=dict(scale=0.00390625,mirror=False))
n.conv1 = L.Convolution(n.data,
kernel_size=5,
num_output=20,
stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.pool1 = L.Pooling(n.conv1,
pool=P.Pooling.MAX,
kernel_size=2,
stride=2)
n.conv2 = L.Convolution(n.pool1,
kernel_size=5,
num_output=50,
stride=1,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.pool2 = L.Pooling(n.conv2,
pool=P.Pooling.MAX,
kernel_size=2,
stride=2)
n.ip1 = L.InnerProduct(n.pool2,
num_output=500,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.relu1 = L.ReLU(n.ip1,
in_place=True)
n.ip2 = L.InnerProduct(n.relu1,
num_output=self.class_num,
weight_filler=dict(type='xavier'),
bias_filler=dict(type='constant'))
n.loss = L.SoftmaxWithLoss(n.ip2,n.label)
return n.to_proto()
if __name__ == '__main__':
l=LeNet('train_path','test_path',10)
print l.lenet_proto(128)