pytorch(七):CNN卷積神經網絡

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision  # 視覺圖片數據庫
from torch.autograd import Variable
import matplotlib.pyplot as plt


# 超參數
EPOCH = 1  # 爲節省時間,只訓練把數據訓練一趟
BATCH_SIZE = 50  # 每批訓練50個數據
LR = 0.001
DOWNLOAD_MNIST = True  # 如果之前已經下載好mnist數據集,把True改爲False


# 下載並轉化MNIST數字數據集
train_data = torchvision.datasets.MNIST(
    root='./mnist/',  # 把下載好的內容放在當前目錄下的mnist文件夾裏
    train=True,  # 這些數據是用來訓練的
    transform=torchvision.transforms.ToTensor(),  # 把圖片轉化成tensor形式
    download=DOWNLOAD_MNIST,  # DOWNLOAD_MNIST是超參數之一
)


# 打印出其中一張圖片
print('train_data.size:', train_data.train_data.size())  # train_data前者爲變量名,後者指的是前者的一個屬性
print('train_labels.size:', train_data.train_labels.size())  # train_labels:標籤
# plot.imshow(), im(img)顯示圖片, cmap='gray',圖片爲灰色
plt.imshow(train_data.train_data[2], cmap='gray')
plt.title('%i' % train_data.train_labels[2])  # 標籤的索引序號要與train_data索引序號一致
plt.show()


# 把數據集放進數據裝載機裏
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)


# 獲取測試集dataset
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
# 取前2000個測試集樣本
test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1),
                  volatile=True).type(torch.FloatTensor)[:2000]/255
# (2000, 28, 28) to (2000, 1, 28, 28), in range(0,1)
test_y = test_data.test_labels[:2000]


# 定義卷積神經網絡
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Sequential(  # input shape (1, 28, 28)
            nn.Conv2d(
                in_channels=1,  # input height, 因爲是黑白圖片,所以height=1
                out_channels=16,   # n_filters, 16個過濾器,輸出採集到的16種不同特徵信息,output shape (16, 28, 28)
                kernel_size=5,  # filter size, 過濾器長寬尺寸均爲5
                stride=1,  # filter movement/step, 過濾器每次移動的距離爲1
                #  padding, 在圖片邊緣填充一層爲0的層,公式:padding=(kernal_size-stride)/2
                padding=2,  
            ),  # output shape(16, 28, 28)
            nn.ReLU(),  # 激勵函數
            nn.MaxPool2d(kernel_size=2),  # 池化, 向下採樣.choose max value in 2x2 area, output shape (16, 14, 14)
        )
        self.conv2 = nn.Sequential(   # input shape (16, 14, 14)
            nn.Conv2d(16, 32, 5, 1, 2),   # 16->32, output shape (32, 14, 14) 
            nn.ReLU(),
            nn.MaxPool2d(2),  # output shape (32, 7, 7)   
        )
        self.out = nn.Linear(32 * 7 * 7, 10)  # fully connected layer, output 10 classes
        
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)           # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
        output = self.out(x)
        return output  # return x for visualization


# 打印CNN
cnn = CNN()
print(cnn)


# 訓練神經網絡
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_loader):
        b_x = Variable(x)
        b_y = Variable(y)
 
        output = cnn(b_x)
        loss = loss_func(output, b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
 
        if step % 100 == 0:
            test_output = cnn(test_x)
            pred_y = torch.max(test_output, 1)[1].data.squeeze()
            accuracy = sum(pred_y == test_y) / test_y.size(0)
            print('Epoch:', epoch, '|Step:', step,
                  '|train loss:%.4f'%loss.data[0], '|test accuracy:%.4f'%accuracy)
        
        
# 用10個數字測試
test_output = cnn(test_x[:10])
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')

運行結果:

1.MNIST數據集中的一幅圖片:

2. 神經網絡訓練結果:

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章