使用Pytorch構建MLP模型實現MNIST手寫數字識別

基本流程

1、加載數據集

2、預處理數據(標準化並轉換爲張量)

3、查閱資料,看看是否已經有人做了這個問題,使用的是什麼模型架構,並定義模型

4、確定損失函數和優化函數,並開始訓練模型

5、使用模型從未見過的數據測試模型

本文在谷歌的Colab上實現

from torchvision import datasets
import torchvision.transforms as transforms
import torch

#非並行加載就填0
num_workers = 0
#決定每次讀取多少圖片
batch_size = 20

#轉換成張量
transform = transforms.ToTensor()


#下載數據
train_data = datasets.MNIST(root = './drive/data',train = True,
                           download = True,transform = transform)
test_data = datasets.MNIST(root = './drive/data',train = True,
                          download = True,transform = transform)

#創建加載器
train_loader = torch.utils.data.DataLoader(train_data,batch_size = batch_size,
                                           num_workers = num_workers)
test_loader = torch.utils.data.DataLoader(test_data,batch_size = batch_size,
                                         num_workers = num_workers)

接下來的可視化部分可以省略

#可視化
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

dataiter = iter(train_loader)
images,labels = next(dataiter)
images = images.numpy()

fig = plt.figure(figsize = (25,4))
for idx in np.arange(20):#前面是讀20張,所以這裏就是20
  ax = fig.add_subplot(2,20/2,idx + 1,xticks = [],yticks = [])
  ax.imshow(np.squeeze(images[idx]),cmap = 'gray')
  
  ax.set_title(str(labels[idx].item()))

下面是輸出,證明數據已經加載進來了
在這裏插入圖片描述

接下來定義我們的模型

# 定義MLP模型

import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
  def __init__(self):
    super(Net,self).__init__()
    
	#兩個全連接的隱藏層,一個輸出層
 	#因爲圖片是28*28的,需要全部展開,最終我們要輸出數字,一共10個數字。
 	#10個數字實際上是10個類別,輸出是概率分佈,最後選取概率最大的作爲預測值輸出
    hidden_1 = 512
    hidden_2 = 512
    self.fc1 = nn.Linear(28 * 28,hidden_1)
    self.fc2 = nn.Linear(hidden_1,hidden_2)
    self.fc3 = nn.Linear(hidden_2,10)
    #使用dropout防止過擬合
    self.dropout = nn.Dropout(0.2)
  def forward(self,x):
    x = x.view(-1,28 * 28)
    x = F.relu(self.fc1(x))
    x = self.dropout(x)
    
    x = F.relu(self.fc2(x))
    
    x = self.dropout(x)
    x = self.fc3(x)
#     x = F.log_softmax(x,dim = 1)
    
    return x
  
model = Net()
#打印出來看是否正確
print(model)
#定義損失函數和優化器

# criterion = nn.NLLLoss()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(params = model.parameters(),lr = 0.01)
#訓練
n_epochs = 50

for epoch in range(n_epochs):
  train_loss = 0.0
  
  
  for data,target in train_loader:
    optimizer.zero_grad()
    output = model(data)#得到預測值
    
    loss = criterion(output,target)
    loss.backward()
    
    optimizer.step()
    train_loss += loss.item()*data.size(0)
  train_loss = train_loss / len(train_loader.dataset)
  print('Epoch:  {}  \tTraining Loss: {:.6f}'.format(
    epoch + 1,
    train_loss))

這裏是測試了,我們用之前的測試數據來測試訓練好的模型,然後統計正確的數目,最後計算每個數字的正確率與總正確率

# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))

model.eval() # prep model for *evaluation*

for data, target in test_loader:
    # forward pass: compute predicted outputs by passing inputs to the model
    output = model(data)
    # calculate the loss
    loss = criterion(output, target)
    # update test loss 
    test_loss += loss.item()*data.size(0)
    # convert output probabilities to predicted class
    _, pred = torch.max(output, 1)
    # compare predictions to true label
    correct = np.squeeze(pred.eq(target.data.view_as(pred)))
    # calculate test accuracy for each object class
    for i in range(batch_size):
        label = target.data[i]
        class_correct[label] += correct[i].item()
        class_total[label] += 1

# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))

for i in range(10):
    if class_total[i] > 0:
        print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
            str(i), 100 * class_correct[i] / class_total[i],
            np.sum(class_correct[i]), np.sum(class_total[i])))
    else:
        print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
    100. * np.sum(class_correct) / np.sum(class_total),
    np.sum(class_correct), np.sum(class_total)))

以下是我測試的結果

Out:
Test Loss: 0.003499

Test Accuracy of     0: 100% (5923/5923)
Test Accuracy of     1: 99% (6740/6742)
Test Accuracy of     2: 99% (5955/5958)
Test Accuracy of     3: 99% (6125/6131)
Test Accuracy of     4: 99% (5841/5842)
Test Accuracy of     5: 100% (5421/5421)
Test Accuracy of     6: 100% (5918/5918)
Test Accuracy of     7: 99% (6264/6265)
Test Accuracy of     8: 99% (5850/5851)
Test Accuracy of     9: 99% (5947/5949)

Test Accuracy (Overall): 99% (59984/60000)

防止過擬合優化

爲了防止過擬合,我們還可以採取另一種辦法:利用訓練集訓練模型,利用檢驗集檢驗當前模型的效果(當本來是好的,變得不再那麼好,可能就是出現了過擬合現象了)利用測試集做測試。
而之所以一定要加入檢驗集做檢驗,不使用測試集做檢驗,是因爲在檢驗集做檢驗的時候,結果會帶有一定的傾向性,即對檢驗集有利的模型和參數會被保留。如果用測試集做檢驗,最後的結果肯定會對結果有利,不利於模型的泛化。

下面是加入檢驗集的代碼:

from torchvision import datasets
import torchvision.transforms as transforms
import torch

#拆分數據集
from torch.utils.data.sampler import SubsetRandomSampler

num_workers = 0

batch_size = 20

#添加驗證集,讓模型自動判斷是否過擬合
valid_size = 0.2


transform = transforms.ToTensor()

train_data = datasets.MNIST(root = './drive/data',train = True,
                           download = True,transform = transform)
test_data = datasets.MNIST(root = './drive/data',train = True,
                          download = True,transform = transform)


num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx,valid_idx = indices[split:],indices[:split]

train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)

train_loader = torch.utils.data.DataLoader(train_data,batch_size = batch_size,
                            sampler = train_sampler,num_workers = num_workers)
valid_loader = torch.utils.data.DataLoader(train_data,batch_size = batch_size,
                            sampler = valid_sampler)
test_loader = torch.utils.data.DataLoader(test_data,batch_size = batch_size,
                                         num_workers = num_workers)

# 定義MLP模型

import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
  def __init__(self):
    super(Net,self).__init__()
    
    hidden_1 = 512
    hidden_2 = 512
    self.fc1 = nn.Linear(28 * 28,hidden_1)
    self.fc2 = nn.Linear(hidden_1,hidden_2)
    self.fc3 = nn.Linear(hidden_2,10)
    
    self.dropout = nn.Dropout(0.2)
  def forward(self,x):
    x = x.view(-1,28 * 28)
    x = F.relu(self.fc1(x))
    x = self.dropout(x)
    
    x = F.relu(self.fc2(x))
    
    x = self.dropout(x)
    
    x = self.fc3(x)
#     x = F.log_softmax(x,dim = 1)
    
    return x
  
model = Net()
print(model)
#定義損失函數和優化器

# criterion = nn.NLLLoss()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(params = model.parameters(),lr = 0.01)

這裏如果訓練集的損失還在減少,但是檢驗集的損失開始上升了,說明可能是過擬合了。
當兩者都還在減少的時候,則保存模型,用於後期的使用

n_epochs = 50

valid_loss_min = np.Inf

for epoch in range(n_epochs):
  train_loss = 0.0
  valid_loss = 0.0
  
  for data,target in train_loader:
    optimizer.zero_grad()
    output = model(data)#得到預測值
    
    loss = criterion(output,target)
    loss.backward()
    
    optimizer.step()
    train_loss += loss.item()*data.size(0)
  
  #計算檢驗集的損失,這裏不需要反向傳播
  for data,target in valid_loader:
    output = model(data)
    loss = criterion(output,target)
    valid_loss += loss.item() * data.size(0)
  
  train_loss = train_loss / len(train_loader.dataset)
  valid_loss = valid_loss / len(valid_loader.dataset)
  print('Epoch:  {}  \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
    epoch + 1,
    train_loss,
    valid_loss))
  if valid_loss <= valid_loss_min:#保存模型
    print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model...'.format(
    valid_loss_min,
    valid_loss))
    torch.save(model.state_dict(),'model.pt')
    valid_loss_min = valid_loss

這一步直接加載之前保存的模型

model.load_state_dict(torch.load('model.pt'))
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))

model.eval() # prep model for *evaluation*

for data, target in test_loader:
    # forward pass: compute predicted outputs by passing inputs to the model
    output = model(data)
    # calculate the loss
    loss = criterion(output, target)
    # update test loss 
    test_loss += loss.item()*data.size(0)
    # convert output probabilities to predicted class
    _, pred = torch.max(output, 1)
    # compare predictions to true label
    correct = np.squeeze(pred.eq(target.data.view_as(pred)))
    # calculate test accuracy for each object class
    for i in range(batch_size):
        label = target.data[i]
        class_correct[label] += correct[i].item()
        class_total[label] += 1

# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))

for i in range(10):
    if class_total[i] > 0:
        print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
            str(i), 100 * class_correct[i] / class_total[i],
            np.sum(class_correct[i]), np.sum(class_total[i])))
    else:
        print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))

print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
    100. * np.sum(class_correct) / np.sum(class_total),
    np.sum(class_correct), np.sum(class_total)))
Out:
Test Loss: 0.019485

Test Accuracy of     0: 99% (5914/5923)
Test Accuracy of     1: 99% (6716/6742)
Test Accuracy of     2: 99% (5929/5958)
Test Accuracy of     3: 99% (6086/6131)
Test Accuracy of     4: 99% (5822/5842)
Test Accuracy of     5: 99% (5393/5421)
Test Accuracy of     6: 99% (5902/5918)
Test Accuracy of     7: 99% (6231/6265)
Test Accuracy of     8: 99% (5822/5851)
Test Accuracy of     9: 99% (5914/5949)

Test Accuracy (Overall): 99% (59729/60000)

這裏發現,其實結果不如之前的那個模型,但是沒關係,前面其實也使用了dropout來防止過擬合,而且這個案例特徵實際上不是很多,模型並不複雜,但特徵變得多起來的時候,檢驗集的威力就能得到顯現了。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章