datawhale訓練營pytorch第四次作業-用pytorch實現多層網絡

【Task4(2天)】用PyTorch實現多層網絡

1.引入模塊,讀取數據

2.構建計算圖(構建網絡模型)

3.損失函數與優化器

4.開始訓練模型

5.對訓練的模型預測結果進行評估

 

1.引入模塊,讀取數據:

import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

%matplotlib inline
data = pd.read_csv('test_data.csv')

2.數據預處理:

data1=data.copy()
y=data1.loc[:,['Outcome']] #數據標籤
del data1['Outcome']
x = data1   #數據
x_train, x_test,y_train,y_test= train_test_split(x, y, test_size=0.3,random_state=2018)   #測試數據集爲30%,隨機種子2018

ss = StandardScaler()  
x_train = ss.fit_transform(x_train) 
x_test = ss.fit_transform(x_test) 
3.數據轉化爲Tensor:

x_train_tensor=torch.from_numpy(x_train)
x_test_tensor=torch.from_numpy(x_test)
y_train_numpy=np.array(y_train)
y_train_tensor=torch.from_numpy(y_train_numpy)
y_test_numpy=np.array(y_test)
y_test_tensor=torch.from_numpy(y_test_numpy)
x=x_train_tensor.float()
y=y_train_tensor.float()
#構建計算圖(構建網絡模型)

class module_net(nn.Module):
    def __init__(self, num_input, num_hidden, num_output):
        super(module_net, self).__init__()
        self.layer1 = nn.Linear(num_input, num_hidden)
        
        self.layer2 = nn.Tanh()
        
        self.layer3 = nn.Linear(num_hidden, num_hidden)
        
        self.layer4 = nn.Tanh()
        
        self.layer5 = nn.Linear(num_hidden, num_hidden)
        
        self.layer6 = nn.Tanh()
        
        self.layer7 = nn.Linear(num_hidden, num_output)
    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x = self.layer6(x)
        x = self.layer7(x)
        
        return x
#損失函數

criterion=nn.BCEWithLogitsLoss()    
mo_net = module_net(8, 10, 1)
optim = torch.optim.SGD(mo_net.parameters(), 0.01, momentum=0.9)

四.開始訓練模型

Loss_list = []   #用來裝loss值,以便之後畫圖
Accuracy_list = []  #用來裝準確率,以便之後畫圖
for e in range(10000):
    out = mo_net.forward(Variable(x))   #這裏省略了 mo_net.forward()
    loss = criterion(out, Variable(y))
    Loss_list.append(loss.data[0])
    #--------------------用於求準確率-------------------------#
    out_class=(out[:]>0).float()  #將out矩陣中大於0的轉化爲1,小於0的轉化爲0,存入a中
    right_num=torch.sum(y==out_class).float()  #分類對的數值
    precision=right_num/out.shape[0]  #準確率
    #--------------------求準確率結束-------------------------#
    Accuracy_list.append(precision)
    optim.zero_grad()    
    loss.backward()
    optim.step()
    if (e + 1) % 1000 == 0:
        print('epoch: {}, loss: {},precision{},right_num{}'.format(e+1, loss.data[0],precision,right_num))
plt.plot(x1, Loss_list,c='red',label='loss')
plt.plot(x1, Accuracy_list,c='blue',label='precision')
plt.legend()
 

五.模型評估

x_test_tensor=x_test_tensor.float()
y_test_tensor=y_test_tensor.float()
out_test=mo_net.forward(Variable(x_test_tensor)) 
loss_test = criterion(out_test, Variable(y_test_tensor))
out_test_class=(out_test[:]>0).float()  #將out矩陣中大於0的轉化爲1,小於0的轉化爲0,存入a中
right_num_test=torch.sum(y_test_tensor==out_test_class).float()  #分類對的數值
precision_test=right_num_test/out_test.shape[0]  #準確率
loss_test=loss_test.data[0]

print('loss_test:{},precision_test:{},right_num_test:{}'.format(loss_test,precision_test,right_num_test))
 

 

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章