文章目錄
1. autograd——自動求導系統
1.1 torch.autograd.backward()
-
功能
自動求取梯度 -
函數
torch.autograd.backward(tensors, grad_tensors=None, retain_graph=None, create_graph=False)
-
參數
- tensors:
用於求導的張量,如loss - grad_tensors:
多元的梯度權重,僅當tensors
不是標量且需要求梯度的時候使用。 - retain_graph:
如果爲False
,則用於釋放計算grad
的圖。請注意,在幾乎所有情況下,沒有必要將此選項設置爲True
,通常可以以更有效的方式解決。默認值爲create_graph
的值。 - create_graph:
如果爲True
,則將構造派生圖,允許計算更高階的派生產品。默認爲False
- tensors:
簡單的應用:
import torch
w = torch.tensor([1.0], requires_grad=True) # requires_grad=True表明需要求它的梯度
x = torch.tensor([2.0], requires_grad=True)
a = torch.add(w, x)
b = torch.add(w, 1)
y = torch.mul(a, b)
y.backward() # 此方法內部調用的就是torch.autograd.backward()
print(w.grad) # tensor([5.])
retain_graph的作用:
# 前面代碼一樣,只是最後連續兩次調用backward()方法
y.backward()
y.backward() # 報錯,因爲計算完成默認不保存計算圖
# 可以改爲:
# y.backward(retain_graph=True) dy/dw=5
# y.backward() dy/dw=10了
#
# # # # # # # # # # # # 相當於 # # # # # # # # # # # #
#
# loss = torch.cat([y0, y1], dim=0)
# grad_tensors = torch.tensor([1., 1.])
# loss.backward(gradient=grad_tensors) 其實是對 loss = 1 * y + 1 * y 進行反向傳播
grad_tensors的作用:
我們手工計算時是可以出現Tensor對Tensor求導的,但是這會使程序異常複雜,所以PyTorch裏只能是標量對Tensor求導
import torch
w = torch.tensor([1.0], requires_grad=True)
x = torch.tensor([2.0], requires_grad=True)
a = torch.add(w, x)
b = torch.add(w, 1)
y0 = torch.mul(a, b) # y0 = (x+w) * (w+1) 前面求過 dy0/dw = 5
y1 = torch.add(a, b) # y1 = (x+w)+ (w+1) dy1/dw = 2
loss = torch.cat([y0, y1], dim=0)
grad_tensors = torch.tensor([1., 2.])
# 這裏真正求梯度的不是loss,而是loss與grad_tensors的內積
loss.backward(gradient=grad_tensors) # 其實是對 loss = 1 * y0 + 2 * y1 進行反向傳播
print(w.grad) # tensor([9.]) dw = 1 * dy0/dw + 2 * dy1/dw
1.2 torch.autograd.grad()
-
功能
求取梯度 -
函數
torch.autograd.grad(outputs, inputs, grad_outputs=None, retain_graph=None, create_graph=False,only_inputs=True, allow_unused=False)
-
參數
- outputs:
用於求導的張量,如loss - inputs:
需要求取梯度的張量,如w, x - grad_outputs:
多梯度權重 - retain_graph:
保存計算圖 - create_graph:
創建倒數計算圖,用於高階求導
- outputs:
簡單應用:
import torch
x = torch.tensor([3.], requires_grad=True)
y = torch.pow(x, 2) # y = x**2
grad_1 = torch.autograd.grad(y, x, create_graph=True) # grad_1 = dy/dx = 2x = 2 * 3 = 6
print(grad_1) # (tensor([6.], grad_fn=<MulBackward0>),)
grad_2 = torch.autograd.grad(grad_1[0], x) # grad_2 = d(dy/dx)/dx = 2
print(grad_2) # (tensor([2.]),)
1.3 autograd的Tips
-
梯度不自動清零
import torch w = torch.tensor([1.], requires_grad=True) x = torch.tensor([2.], requires_grad=True) for i in range(4): a = torch.add(w, x) b = torch.add(w, 1) y = torch.mul(a, b) y.backward() print(w.grad) # ========輸出結果========== # tensor([5.]) # tensor([10.]) # tensor([15.]) # tensor([20.])
這裏原因類似前面使用
retain_graph=True
的情況,那如何每次求到正確的梯度呢?答案是每次求完梯度後清零
import torch w = torch.tensor([1.], requires_grad=True) x = torch.tensor([2.], requires_grad=True) for i in range(4): a = torch.add(w, x) b = torch.add(w, 1) y = torch.mul(a, b) y.backward() print(w.grad) w.grad.zero_() # 下劃線結尾的操作是原位操作 # ========輸出結果========== # tensor([5.]) # tensor([5.]) # tensor([5.]) # tensor([5.])
-
依賴於葉子結點的結點,
requires_grad
默認爲True
簡單說,下層變量requires_grad
設爲True
的話,用它來進行張量運算的結果,requires_grad
的默認值也是True
import torch w = torch.tensor([1.], requires_grad=True) x = torch.tensor([2.], requires_grad=True) a = torch.add(w, x) b = torch.add(w, 1) y = torch.mul(a, b) print(a.requires_grad, b.requires_grad, y.requires_grad) # True True True
-
葉子結點不能執行in-place操作
2. 機器學習模型訓練步驟
3. Logistic迴歸的簡單實現
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
torch.manual_seed(10)
# ============================ step 1/5 生成數據 ============================
sample_nums = 100
mean_value = 1.7
bias = 1
n_data = torch.ones(sample_nums, 2)
x0 = torch.normal(mean_value * n_data, 1) + bias # 類別0 數據 shape=(100, 2)
y0 = torch.zeros(sample_nums) # 類別0 標籤 shape=(100, 1)
x1 = torch.normal(-mean_value * n_data, 1) + bias # 類別1 數據 shape=(100, 2)
y1 = torch.ones(sample_nums) # 類別1 標籤 shape=(100, 1)
train_x = torch.cat((x0, x1), 0)
train_y = torch.cat((y0, y1), 0)
# ============================ step 2/5 選擇模型 ============================
class LR(nn.Module):
def __init__(self):
super(LR, self).__init__()
self.features = nn.Linear(2, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.features(x)
x = self.sigmoid(x)
return x
lr_net = LR() # 實例化邏輯迴歸模型
# ============================ step 3/5 選擇損失函數 ============================
loss_fn = nn.BCELoss()
# ============================ step 4/5 選擇優化器 ============================
lr = 0.01 # 學習率
optimizer = torch.optim.SGD(lr_net.parameters(), lr=lr, momentum=0.9)
# ============================ step 5/5 模型訓練 ============================
for iteration in range(1000):
# 前向傳播
y_pred = lr_net(train_x)
# 計算 loss
loss = loss_fn(y_pred.squeeze(), train_y)
# 反向傳播
loss.backward()
# 更新參數
optimizer.step()
# 清空梯度
optimizer.zero_grad()
# 繪圖
if iteration % 20 == 0:
mask = y_pred.ge(0.5).float().squeeze() # 以0.5爲閾值進行分類
correct = (mask == train_y).sum() # 計算正確預測的樣本個數
acc = correct.item() / train_y.size(0) # 計算分類準確率
plt.scatter(x0.data.numpy()[:, 0], x0.data.numpy()[:, 1], c='r', label='class 0')
plt.scatter(x1.data.numpy()[:, 0], x1.data.numpy()[:, 1], c='b', label='class 1')
w0, w1 = lr_net.features.weight[0]
w0, w1 = float(w0.item()), float(w1.item())
plot_b = float(lr_net.features.bias[0].item())
plot_x = np.arange(-6, 6, 0.1)
plot_y = (-w0 * plot_x - plot_b) / w1
plt.xlim(-5, 7)
plt.ylim(-7, 7)
plt.plot(plot_x, plot_y)
plt.text(-5, 5, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
plt.title("Iteration: {}\nw0:{:.2f} w1:{:.2f} b: {:.2f} accuracy:{:.2%}".format(iteration, w0, w1, plot_b, acc))
plt.legend()
plt.show()
plt.pause(0.5)
if acc > 0.999:
break