Pytorch實現 分別使用AlexNet、VGG、NiN對LeNet網絡改進

Pytorch實現 分別使用AlexNet、VGG、NiN對LeNet網絡改進

在這裏插入圖片描述

1.基於AlexNet結構改進LeNet

AlexNet結構:
在這裏插入圖片描述
①與相對較⼩的LeNet相⽐, AlexNet包含8層變換,其中有5層卷積和2層全連接隱藏層,以及1個全連接輸出層。
②AlexNet將sigmoid激活函數改成了更加簡單的ReLU激活函數。
③AlexNet通過丟棄法來控制全連接層的模型複雜度。
④AlexNet引⼊了⼤量的圖像增⼴,如翻轉、裁剪和顏⾊變化,從⽽進⼀步擴⼤數據集來緩解過擬合。

在LeNet基礎上改進:首先將fashionMNIST數據集中用於訓練和測試的圖片尺寸擴張成32×32。

transform = transforms.Compose(
        [transforms.Resize(32),
         transforms.ToTensor()])
    mnist_train = torchvision.datasets.FashionMNIST(
        root="./DataSets/FashionMNIST",
        train=True,
        download=True,
        transform=transform)

    mnist_test = torchvision.datasets.FashionMNIST(
        root="./DataSets/FashionMNIST",
        train=False,
        download=True,
        transform=transform)

完整代碼:

import os
import torch
import torch.nn as nn
import sys
import time
import d2lzh_pytorch as d2l

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(torch.__version__)
print(device)
start = time.time()
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 6, 5), # in_channels, out_channels, kernel_size
            nn.ReLU(),
            nn.MaxPool2d(2, 2), # kernel_size, stride
            nn.Conv2d(6, 16, 5),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(16, 120, 5),
            nn.ReLU()
        )
        self.fc = nn.Sequential(
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(84,10)
        )

    def forward(self, img):
        feature = self.conv(img)
        output = self.fc(feature.view(img.shape[0], -1))
        return output
net = LeNet()
print(net)
batch_size = 128
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)

def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval() # 評估模式, 這會關閉dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train() # 改回訓練模式
            else: # 自定義的模型, 3.13節之後不會用到, 不考慮GPU
                if('is_training' in net.__code__.co_varnames): # 如果有is_training這個參數
                    # 將is_training設置成False
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n
lr, num_epochs = 0.001, 10
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
#train
net = net.to(device)
print("training on ", device)
loss = torch.nn.CrossEntropyLoss()
batch_count = 0
total_time = 0
for epoch in range(num_epochs):
    train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
    for X, y in train_iter:
        X = X.to(device)
        y = y.to(device)
        y_hat = net(X)
        l = loss(y_hat, y)
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
        train_l_sum += l.cpu().item()
        train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
        n += y.shape[0]
        batch_count += 1
    test_acc = evaluate_accuracy(test_iter, net)
    total_time += round(time.time()-start,2)
    print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
            % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))

end = time.time()
print('total time:%.2f sec'%(total_time))

2.基於VGG結構改進LeNet

VGG塊的組成規律是:連續使⽤數個相同的填充爲1、窗⼝形狀爲 的卷積層後接上⼀個步幅爲2、窗⼝形狀爲 的最⼤池化層。卷積層保持輸⼊的⾼和寬不變,⽽池化層則對其減半。使⽤ vgg_block 函數來實現這個基礎的VGG塊,它可以指定卷積層的數量和輸⼊輸出通道數。
和AlexNet結構改進LeNet同樣的首先改變輸入圖片的尺寸大小爲32×32。

transform = transforms.Compose(
        [transforms.Resize(32),
         transforms.ToTensor()])
    mnist_train = torchvision.datasets.FashionMNIST(
        root="./DataSets/FashionMNIST",
        train=True,
        download=True,
        transform=transform)

    mnist_test = torchvision.datasets.FashionMNIST(
        root="./DataSets/FashionMNIST",
        train=False,
        download=True,
        transform=transform)

完整代碼:

import os
import torch
import torch.nn as nn
import sys
import time
import d2lzh_pytorch as d2l

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(torch.__version__)
print(device)
start = time.time()
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 6, 3), # in_channels, out_channels, kernel_size
            nn.ReLU(),
            nn.Conv2d(6, 6, 3),
            nn.ReLU(),
            nn.MaxPool2d(2, 2), # kernel_size, stride
            nn.Conv2d(6, 16, 3),
            nn.ReLU(),
            nn.Conv2d(16, 16, 3),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(16, 120, 3),
            nn.ReLU(),
            nn.Conv2d(120, 120, 3),
            nn.ReLU()
        )
        self.fc = nn.Sequential(
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(84,10)
        )

    def forward(self, img):
        feature = self.conv(img)
        output = self.fc(feature.view(img.shape[0], -1))
        return output
net = LeNet()
print(net)
batch_size = 128
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)

def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval() # 評估模式, 這會關閉dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train() # 改回訓練模式
            else: # 自定義的模型, 3.13節之後不會用到, 不考慮GPU
                if('is_training' in net.__code__.co_varnames): # 如果有is_training這個參數
                    # 將is_training設置成False
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n
lr, num_epochs = 0.001, 10
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
#train
net = net.to(device)
print("training on ", device)
loss = torch.nn.CrossEntropyLoss()
batch_count = 0
total_time = 0
for epoch in range(num_epochs):
    train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
    for X, y in train_iter:
        X = X.to(device)
        y = y.to(device)
        y_hat = net(X)
        l = loss(y_hat, y)
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
        train_l_sum += l.cpu().item()
        train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
        n += y.shape[0]
        batch_count += 1
    test_acc = evaluate_accuracy(test_iter, net)
    total_time += round(time.time()-start,2)
    print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
            % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))

end = time.time()
print('total time:%.2f sec'%(total_time))

對於給定的感受野(與輸出有關的輸⼊圖⽚的局部⼤⼩),採⽤堆積的⼩卷積核優於採⽤⼤的卷積核,因爲可以增加⽹絡深度來保證學習更復雜的模式,⽽且代價還⽐較⼩(參數更少)。使⽤2個3x3卷積核來代替5*5卷積核,這樣做的主要⽬的是在保證具有相同感受野的條件下,提升了⽹絡的深度,在⼀定程度上提升了神經⽹絡的效果。

3.基於NiN結構改進LeNet

在這裏插入圖片描述
卷積層的輸⼊和輸出通常是四維數組(樣本,通道,⾼,寬),⽽全連接層的輸⼊和輸出則通常是⼆維數組(樣本,特徵)。如果想在全連接層後再接上卷積層,則需要將全連接層的輸出變換爲四維。它可以看成全連接層,其中空間維度(⾼和寬)上的每個元素相當於樣本,通道相當於特徵。 NiN使⽤卷積層來替代全連接層,從⽽使空間信息能夠⾃然傳遞到後⾯的層中去。
和AlexNet結構改進LeNet同樣的首先改變輸入圖片的尺寸大小爲32×32。

transform = transforms.Compose(
        [transforms.Resize(32),
         transforms.ToTensor()])
    mnist_train = torchvision.datasets.FashionMNIST(
        root="./DataSets/FashionMNIST",
        train=True,
        download=True,
        transform=transform)

    mnist_test = torchvision.datasets.FashionMNIST(
        root="./DataSets/FashionMNIST",
        train=False,
        download=True,
        transform=transform)

完整代碼:

import os
import torch
import torch.nn as nn
import sys
import time
import d2lzh_pytorch as d2l

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

print(torch.__version__)
print(device)
start = time.time()

def nin_block(in_channels, out_channels, kernel_size, strides, padding):
    blk = nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding),
        nn.ReLU(),
        nn.Conv2d(out_channels, out_channels, kernel_size=1),
        nn.ReLU(),
        nn.Conv2d(out_channels, out_channels, kernel_size=1),
        nn.ReLU()
    )
    return blk
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv = nn.Sequential(
            nin_block(1, 6, 5, 1, 0),
            nn.MaxPool2d(2,2),
            nin_block(6, 16, 5, 1, 0),
            nn.MaxPool2d(2, 2),
            nin_block(16, 120, 5, 1, 0)
        )
        self.fc = nn.Sequential(
            nn.Linear(120, 10))

    def forward(self, img):
        feature = self.conv(img)
        output = self.fc(feature.view(img.shape[0], -1))
        return output
net = LeNet()
print(net)
batch_size = 128
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)

def evaluate_accuracy(data_iter, net):
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval() # 評估模式, 這會關閉dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train() # 改回訓練模式
            else: # 自定義的模型, 3.13節之後不會用到, 不考慮GPU
                if('is_training' in net.__code__.co_varnames): # 如果有is_training這個參數
                    # 將is_training設置成False
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n
lr, num_epochs = 0.001, 10
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
#train
net = net.to(device)
print("training on ", device)
loss = torch.nn.CrossEntropyLoss()
batch_count = 0
total_time = 0
for epoch in range(num_epochs):
    train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
    for X, y in train_iter:
        X = X.to(device)
        y = y.to(device)
        y_hat = net(X)
        l = loss(y_hat, y)
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
        train_l_sum += l.cpu().item()
        train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
        n += y.shape[0]
        batch_count += 1
    test_acc = evaluate_accuracy(test_iter, net)
    total_time += round(time.time()-start,2)
    print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
            % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))

end = time.time()
print('total time:%.2f sec'%(total_time))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章