識別模型採用restnet18 模型大概40Mb左右
代碼如下
import torch.nn as nn
class SVHN_Model1(nn.Model):
super(SVHN_Model1, self).__init__()
def __init__(self):
model_conv = models.resnet18(pretrained=True)
model_conv = nn.Sequential(*list(model_cov.children())[:-1]) #不要最後一層
self.cnn = model_conv
self.fc1 = nn.Linear(512, 11)
self.fc2 = nn.Linear(512, 11)
self.fc3 = nn.Linear(512, 11)
self.fc4 = nn.Linear(512, 11)
self.fc5 = nn.Linear(512, 11)
def forward(self, img):
feat = self.cnn(img)
print(feat.shape) # 打印feata的維度,用來得出self.conv層輸出的個數。
feat = feat.view(feat.shape[0], -1)
c1 = self.fc1(feat)
c2 = self.fc2(feat)
c3 = self.fc3(feat)
c4 = self.fc4(feat)
c5 = self.fc5(feat)
return c1,c2,c3,c4,c5
# 測試
from troch.autograde import Variable
input_shape = Variabel(torch.randn([40,3,64,128]))
net = SVHN_Model1()
net(input_shape) # 根據輸出的結果來調整全連接層的個數,使下一層的輸入channel數與上面卷積層展開相匹配。
- *list的理解
def add(a, b):
return a+b
l = [1,2]
d = {"a":3,"b":4}
add(*l) # 結果爲3
add(**d) # 結果爲7
可以看出*操作對列表和字典的影響,即剝離其value值
比賽中model的嘗試
- restnet18加上imagnet的預訓練參數,結果還是不錯的,大概20個epoch就訓練的差不多了
- restnet34試了一下,結果和resnet18差不多
- 試了其他結構的模型,發現只要是沒有與訓練參數的模型,訓練都比較緩慢,沒有取得好的結果,可能是我的訓練策略有問題,可能learning rate 要調整。