在PyTorch入門:使用PyTorch搭建神經網絡LeNet5一文中,我們已經使用PyTorch實現了一個簡單的神經網絡LeNet5,本文將基於PyTorch使用LeNet5和CIFAR10實現圖片分類模型的定義、訓練和測試的全過程,代碼(有詳細註釋)如下:
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
## 構建神經網絡模型:將LeNet5模型的輸入改爲3個通道
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
self.fc1 = nn.Linear(in_features=16*5*5, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=84)
self.fc3 = nn.Linear(in_features=84, out_features=10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16*5*5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
## normalize:torchvision中數據集是元素值在[0,1]範圍的PIL圖片(C,H,W),需將其數值範圍轉換爲[-1,1]
normalization = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
## 加載CIFAR10數據集
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, transform=normalization, download=True)
train_set_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=4, shuffle=True, num_workers=0) # Windows系統中建議把num_workers設爲0
test_set = torchvision.datasets.CIFAR10(root='./data', train=False, transform=normalization, download=True)
test_set_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=4, shuffle=False, num_workers=0) # Windows系統中建議把num_workers設爲0
# ## 顯示CIFAR10數據集中的一些圖片
# def imshow(img):
# # print(img.size())
# img = img / 2 + 0.5 # unnormalize: [-1,1] => [0,1]
# img = img.numpy()
# plt.imshow(np.transpose(img, (1, 2, 0))) # PIL的(C,H,W) => matplotlib的(H,W,C)
# plt.show()
# classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# data_iter = iter(train_set_loader)
# images, labels = data_iter.next() # images, labels都是tensor
# # print(images.size())
# # print(labels.size())
# imshow(torchvision.utils.make_grid(images))
# print(' '.join('%s' % classes[labels[j]] for j in range(len(labels))))
## 定義神經網絡、損失函數和優化器
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(params=net.parameters(), lr=0.001, momentum=0.9) # SGD with momentum
## 訓練神經網絡
print('Training Started')
for epoch in range(5): # 1個epoch會將所有數據訓練一次
running_loss = 0.0 # 用來在控制檯輸出loss,以觀察訓練情況
for i, data in enumerate(iterable=train_set_loader, start=0):
# 獲取數據
inputs, labels = data
# 清空梯度緩存
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 輸出每2000個mini-batch的平均loss
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%3d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
## 保存模型參數
torch.save(net.state_dict(), './data/LeNet5.pt')
## 測試模型
print('Testing Started')
net_new = Net()
net_new.load_state_dict(torch.load('./data/LeNet5.pt'))
correct = 0
total = 0
with torch.no_grad():
for data in test_set_loader:
images, labels = data
_, predictions = torch.max(net_new(images), 1)
total += labels.size(0)
correct += (predictions==labels).sum().item()
print('Accuracy: %d/%d = %.2f%%' % (correct, total, correct/total*100) )
"""Explore:
使用GPU後會發現速度並沒有增加很多,原因是LeNet這個模型非常小。
如果將模型寬度增大(增加2個卷積層的卷積核數量),GPU對模型的加速效果會是怎麼樣的呢?
"""
Github(github.com):@chouxianyu
Github Pages(github.io):@臭鹹魚
知乎(zhihu.com):@臭鹹魚
博客園(cnblogs.com):@臭鹹魚
B站(bilibili.com):@絕版臭鹹魚
微信公衆號:@臭鹹魚
轉載請註明出處,歡迎討論和交流!