Pytorch 學習(三):Pytorch 線性迴歸模型實現

Pytorch 入門:線性迴歸實現

本方法源自《動手學深度學習》(Pytorch版)github項目

對於一個簡單線性迴歸問題,使用 Pytorch 利用梯度下降法進行解決

問題陳述

對於公式 $ y=w_1x_1+w_2x_2+b $,其中 $w=[2, -3.4],\ b=4.2$ 爲公式參數的真值。現需要創建模擬數據、構建線性迴歸模型並對參數進行梯度求解。

實驗過程

  1. 創建 $x$$y$ 的數據,隨機生成 1000 個 $x$ 樣本,根據公式計算出 $label\ y$ 並添加隨機噪聲,編寫 batch 的數據讀取方式
  2. 構建線性迴歸計算過程、損失函數、SGD 梯度下降過程並對參數 $w$$b$ 初始化
  3. 利用梯度下降進行迭代計算

代碼實現

方法一:先造輪子再上車,從盤古開天闢地開始說起

import torch
import numpy as np
import random

true_w = [2.0, -3.4]
true_b = 4.2

num_inputs = 2
num_examples = 1000

# 1000 個樣本構建
features = torch.randn(num_examples, num_inputs, dtype=torch.float32)
labels = features.mm(torch.tensor(true_w).view(2, 1)) + true_b
# 添加均值爲 0,方差爲 0.01 的隨機噪聲給 y,從而對其進行擬合
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float32)

# 構建以 batch 爲單位的數據
batch_size = 10
def batch_iter(batch_size, features, labels):
  nums = len(features)
  indices = list(range(nums))
  random.shuffle(indices)
  for i in range(0, nums, batch_size):
    j = torch.LongTensor(indices[i: min(i + batch_size, nums)])
    yield features.index_select(0, j), labels.index_select(0, j)
    # yield 以迭代器的形式返回
    # index_select(dim, indices) 選取 dim 維度上的 indices 數據

# 構建線性迴歸模型
def linearReg(w, b, X):
  return X.mm(w) + b

def square_loss(y_, y):
  return (y_ - y) ** 2 / 2.0

def SGD(params, lr, batch_size):
  for param in params:
    param.data -= lr * param.grad_fn / batch_size
    # 通過 .data 改變梯度不會影響梯度回傳的值

# w, b intilization
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)), dtype=torch.float32, requries_grad=True)
b = torch.ones(1, dtype=torch.float32, requries_grad=True)

# 迭代下降
lr = 0.03
epochs = 3
net = linearReg
loss = square_loss
for epo in range(1, epochs + 1):
  for X, y_ in batch_iter(batch_size, features, labels):
    y = net(w, b, X)
    l = loss(y_, y)
    l.backward()  # 梯度回傳
    SGD([w, b], lr, batch_size)  # 梯度下降
    # 梯度清零
    w.gard.data.zero_()
    b.gard.data.zero_()
  train_l = loss(net(w, b, features), labels)
  print('epoch: %d, loss: %f' % (epo, train_l))

# 查看擬合結果
print('w: [%f, %f], b: %f' % (w[0].item(), w[1].item(), b.item()))

方法二:先上車再說話,我是調包俠

import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.optim as optim
from torch.nn import init

true_w = [2.0, -3.4]
true_b = 4.2

# 相似的數據構造
num_inputs = 2
num_examples = 1000
features = torch.randn(num_examples, num_inputs, dtype=torch.float32)
labels = features.mm(torch.tensor(true_w).view(2, 1)) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float32)

# 調用函數創建 batch 形式的數據
batch_size = 10
dataset = Data.TensorDataset(features, labels)
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)

# 構建線性迴歸模型
class linearNet(nn.module):
  def __init__(self, n_features):
    super(linearNet, self).__init__()
    self.linear = nn.linear(n_features, 1)

  def forward(x):
    return self.linear(x)

loss = nn.MSEloss()
optimizer = optim.SGD(net.parameters(), lr=0.03)

# w, b intilization
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)

# 疊加下降
net = linearNet(num_inputs)
epochs = 3
for epo in range(1, epochs + 1):
  for X, y_ in data_iter:
    y = net(X)
    l = loss(y_, y.view(-1, 1))
    optimizer.zero_grad()  # 梯度清零
    l.backward()  # 梯度回傳
    optimizer.step()  # 梯度下降
  print('epoch: %d, loss: %f' % (epo, l.item()))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章