模型選擇、過擬合和欠擬合
訓練誤差和泛化誤差
在解釋上述現象之前,我們需要區分訓練誤差(training error)和泛化誤差(generalization error)。通俗來講,前者指模型在訓練數據集上表現出的誤差,後者指模型在任意一個測試數據樣本上表現出的誤差的期望,並常常通過測試數據集上的誤差來近似。
多項式函數擬合實驗
%matplotlib inline
import torch
import numpy as np
import sys
sys.path.append("/home/kesci/input")
import d2lzh1981 as d2l
print(torch.__version__)
#初始化模型參數
n_train, n_test, true_w, true_b = 100, 100, [1.2, -3.4, 5.6], 5
features = torch.randn((n_train + n_test, 1))
poly_features = torch.cat((features, torch.pow(features, 2), torch.pow(features, 3)), 1) #torch.cat張量拼接
labels = (true_w[0] * poly_features[:, 0] + true_w[1] * poly_features[:, 1]
+ true_w[2] * poly_features[:, 2] + true_b)
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
#定義、訓練和測試模型
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,
legend=None, figsize=(3.5, 2.5)):
# d2l.set_figsize(figsize)
d2l.plt.xlabel(x_label)
d2l.plt.ylabel(y_label)
d2l.plt.semilogy(x_vals, y_vals)
if x2_vals and y2_vals:
d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':')#用於繪製折線圖,y 軸是指數型的。
d2l.plt.legend(legend)
num_epochs, loss = 100, torch.nn.MSELoss()
def fit_and_plot(train_features, test_features, train_labels, test_labels):
# 初始化網絡模型
net = torch.nn.Linear(train_features.shape[-1], 1)
# 通過Linear文檔可知,pytorch已經將參數初始化了,所以我們這裏就不手動初始化了
# 設置批量大小
batch_size = min(10, train_labels.shape[0])
dataset = torch.utils.data.TensorDataset(train_features, train_labels) # 設置數據集
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True) # 設置獲取數據方式
optimizer = torch.optim.SGD(net.parameters(), lr=0.01) # 設置優化函數,使用的是隨機梯度下降優化
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter: # 取一個批量的數據
l = loss(net(X), y.view(-1, 1)) # 輸入到網絡中計算輸出,並和標籤比較求得損失函數
optimizer.zero_grad() # 梯度清零,防止梯度累加干擾優化
l.backward() # 求梯度
optimizer.step() # 迭代優化函數,進行參數優化
train_labels = train_labels.view(-1, 1)
test_labels = test_labels.view(-1, 1)
train_ls.append(loss(net(train_features), train_labels).item()) # 將訓練損失保存到train_ls中
test_ls.append(loss(net(test_features), test_labels).item()) # 將測試損失保存到test_ls中
print('final epoch: train loss', train_ls[-1], 'test loss', test_ls[-1])
semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('weight:', net.weight.data,
'\nbias:', net.bias.data)
#三階多項式函數擬合(正常)
fit_and_plot(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:])
#線性函數擬合(欠擬合)
fit_and_plot(features[:n_train, :], features[n_train:, :], labels[:n_train], labels[n_train:])
#訓練樣本不足(過擬合)
fit_and_plot(poly_features[0:2, :], poly_features[n_train:, :], labels[0:2], labels[n_train:])
權重衰減
方法
權重衰減等價於L2範數正則化(regularization)。正則化通過爲模型損失函數添加懲罰項使學出的模型參數值較小,是應對過擬合的常用手段。
高維線性迴歸實驗從零開始的實現
下面,我們以高維線性迴歸爲例來引入一個過擬合問題,並使用權重衰減來應對過擬合。設數據樣本特徵的維度爲p。對於訓練數據集和測試數據集中特徵爲的任一樣本,我們使用如下的線性函數來生成該樣本的標籤:
其中噪聲項服從均值爲0、標準差爲0.01的正態分佈。爲了較容易地觀察過擬合,我們考慮高維線性迴歸問題,如設維度p=200;同時,我們特意把訓練數據集的樣本數設低,如20。
%matplotlib inline
import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append("/home/kesci/input")
import d2lzh1981 as d2l
print(torch.__version__)
#初始化模型參數
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05
features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
# 定義參數初始化函數,初始化模型參數並且附上梯度
def init_params():
w = torch.randn((num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
#定義L2範數懲罰項
def l2_penalty(w):
return (w**2).sum() / 2
#定義訓練和測試
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = d2l.linreg, d2l.squared_loss
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
# 添加了L2範數懲罰項
l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
l = l.sum()
if w.grad is not None:
w.grad.data.zero_()
b.grad.data.zero_()
l.backward()
d2l.sgd([w, b], lr, batch_size)
train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().item())
#觀察過擬合
fit_and_plot(lambd=0)
#使用權重衰減
fit_and_plot(lambd=3)
簡潔實現
def fit_and_plot_pytorch(wd):
# 對權重參數衰減。權重名稱一般是以weight結尾
net = nn.Linear(num_inputs, 1)
nn.init.normal_(net.weight, mean=0, std=1)
nn.init.normal_(net.bias, mean=0, std=1)
optimizer_w = torch.optim.SGD(params=[net.weight], lr=lr, weight_decay=wd) # 對權重參數衰減
optimizer_b = torch.optim.SGD(params=[net.bias], lr=lr) # 不對偏差參數衰減
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y).mean()
optimizer_w.zero_grad()
optimizer_b.zero_grad()
l.backward()
# 對兩個optimizer實例分別調用step函數,從而分別更新權重和偏差
optimizer_w.step()
optimizer_b.step()
train_ls.append(loss(net(train_features), train_labels).mean().item())
test_ls.append(loss(net(test_features), test_labels).mean().item())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',
range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', net.weight.data.norm().item())
fit_and_plot_pytorch(0)
fit_and_plot_pytorch(3)
dropout
%matplotlib inline
import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append("/home/kesci/input")
import d2lzh1981 as d2l
print(torch.__version__)
def dropout(X, drop_prob):
X = X.float()
assert 0 <= drop_prob <= 1
keep_prob = 1 - drop_prob
# 這種情況下把全部元素都丟棄
if keep_prob == 0:
return torch.zeros_like(X)
mask = (torch.rand(X.shape) < keep_prob).float()
return mask * X / keep_prob
# 參數的初始化
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
W1 = torch.tensor(np.random.normal(0, 0.01, size=(num_inputs, num_hiddens1)), dtype=torch.float, requires_grad=True)
b1 = torch.zeros(num_hiddens1, requires_grad=True)
W2 = torch.tensor(np.random.normal(0, 0.01, size=(num_hiddens1, num_hiddens2)), dtype=torch.float, requires_grad=True)
b2 = torch.zeros(num_hiddens2, requires_grad=True)
W3 = torch.tensor(np.random.normal(0, 0.01, size=(num_hiddens2, num_outputs)), dtype=torch.float, requires_grad=True)
b3 = torch.zeros(num_outputs, requires_grad=True)
drop_prob1, drop_prob2 = 0.2, 0.5
def net(X, is_training=True):
X = X.view(-1, num_inputs)
H1 = (torch.matmul(X, W1) + b1).relu()
if is_training: # 只在訓練模型時使用丟棄法
H1 = dropout(H1, drop_prob1) # 在第一層全連接後添加丟棄層
H2 = (torch.matmul(H1, W2) + b2).relu()
if is_training:
H2 = dropout(H2, drop_prob2) # 在第二層全連接後添加丟棄層
return torch.matmul(H2, W3) + b3
def evaluate_accuracy(data_iter, net):
acc_sum, n = 0.0, 0
for X, y in data_iter:
if isinstance(net, torch.nn.Module):
net.eval() # 評估模式, 這會關閉dropout
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
net.train() # 改回訓練模式
else: # 自定義的模型
if('is_training' in net.__code__.co_varnames): # 如果有is_training這個參數
# 將is_training設置成False
acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
num_epochs, lr, batch_size = 5, 100.0, 256 # 這裏的學習率設置的很大,原因與之前相同。
loss = torch.nn.CrossEntropyLoss()
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, root='/home/kesci/input/FashionMNIST2065')
d2l.train_ch3(
net,
train_iter,
test_iter,
loss,
num_epochs,
batch_size,
params,
lr)
簡潔實現
net = nn.Sequential(
d2l.FlattenLayer(),
nn.Linear(num_inputs, num_hiddens1),
nn.ReLU(),
nn.Dropout(drop_prob1),
nn.Linear(num_hiddens1, num_hiddens2),
nn.ReLU(),
nn.Dropout(drop_prob2),
nn.Linear(num_hiddens2, 10)
)
for param in net.parameters():
nn.init.normal_(param, mean=0, std=0.01)
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)