识别模型采用restnet18 模型大概40Mb左右
代码如下
import torch.nn as nn
class SVHN_Model1(nn.Model):
super(SVHN_Model1, self).__init__()
def __init__(self):
model_conv = models.resnet18(pretrained=True)
model_conv = nn.Sequential(*list(model_cov.children())[:-1]) #不要最后一层
self.cnn = model_conv
self.fc1 = nn.Linear(512, 11)
self.fc2 = nn.Linear(512, 11)
self.fc3 = nn.Linear(512, 11)
self.fc4 = nn.Linear(512, 11)
self.fc5 = nn.Linear(512, 11)
def forward(self, img):
feat = self.cnn(img)
print(feat.shape) # 打印feata的维度,用来得出self.conv层输出的个数。
feat = feat.view(feat.shape[0], -1)
c1 = self.fc1(feat)
c2 = self.fc2(feat)
c3 = self.fc3(feat)
c4 = self.fc4(feat)
c5 = self.fc5(feat)
return c1,c2,c3,c4,c5
# 测试
from troch.autograde import Variable
input_shape = Variabel(torch.randn([40,3,64,128]))
net = SVHN_Model1()
net(input_shape) # 根据输出的结果来调整全连接层的个数,使下一层的输入channel数与上面卷积层展开相匹配。
- *list的理解
def add(a, b):
return a+b
l = [1,2]
d = {"a":3,"b":4}
add(*l) # 结果为3
add(**d) # 结果为7
可以看出*操作对列表和字典的影响,即剥离其value值
比赛中model的尝试
- restnet18加上imagnet的预训练参数,结果还是不错的,大概20个epoch就训练的差不多了
- restnet34试了一下,结果和resnet18差不多
- 试了其他结构的模型,发现只要是没有与训练参数的模型,训练都比较缓慢,没有取得好的结果,可能是我的训练策略有问题,可能learning rate 要调整。