PyTorch搭建神經網絡的一般步驟

  • 給定輸入輸出
  • 定義一個模型
  • 定義損失函數(loss function)和優化函數(optimizer)
  • 訓練一個過程

一個簡單的例子:

import torch
import torch.nn as nn

N, D_in, H, D_out = 64, 1000, 100, 10

x = torch.randn(N, D_in)
y = torch.randn(N, D_out)

class TwoLayerNet(torch.nn.Module):
    def __init__(self, D_in, H, D_out):
        super(TwoLayerNet, self).__init__()
        self.linear1 = nn.Linear(D_in, H, bias=False)
        self.linear2 = nn.Linear(H, D_out, bias=False)
        
    def forward(self, x):
        y_pred = self.linear2(self.linear1(x).clamp(min=0))
        return y_pred
    
    
# model = TwoLayerNet(D_in, H, D_out)

if torch.cuda.is_available():
    x = x.cuda()
    y = y.cuda()
    model = TwoLayerNet(D_in, H, D_out).cuda()

else:
    model = TwoLayerNet(D_in,H, D_out)
    
# 損失函數和優化函數
loss_fn = nn.MSELoss(reduction='sum')

optimizer = torch.optim.SGD(model.parameters(), lr = 1e-4)

for it in range(500):
    # Forward pass
    y_pred = model(x)
    
    # 計算loss
    loss = loss_fn(y_pred, y)
    print(it, loss.item())
    
    # backward pass
    optimizer.zero_grad()  # 避免出現梯度累積
    loss.backward()   # 反向傳播
    
    # updata model parameters
    optimizer.step()
    
    


訓練結果:

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章