pytorch —— 正則化之Dropout

1、Dropout概念

Dropout:隨機失活,隨機是dropout probability,失活是指weight=0。

通過下面的示例圖理解隨機失活:
在這裏插入圖片描述
左邊的圖是正常的全連接網絡,右邊的圖是使用dropout的神經網絡,dropout是以一定的概率讓一部分的神經元失活,這可以讓神經元學習到更魯棒的特徵,減輕過度的依賴性,從而緩解過擬合,降低方差達到正則化效果,這種操作可以使模型更多樣化,因爲每一次前向傳播神經元都會隨機失活,每次訓練得到的模型都是不一樣的。

爲什麼dropout能夠達到很好的正則化效果呢?

  1. 從特徵依賴性角度
    假設一個神經元會接收上一層的五個神經元的輸出值,可以理解爲上一層的特徵,如果當前神經元特別依賴於某一個特徵。如果加了dropout之後,當前神經元就不知道上一層所有神經元中哪些神經元會出現,這樣當前神經元就不會過度依賴上一層神經元中的某些神經元。

數據尺度變化
測試時,所有權重乘以1-drop_prob,例如drop_prob=0.3,1-drop_prob=0.7;

1.2 nn.Dropout

功能:Dropout層;
參數

  • P:被捨棄概率,失活概率;
    注意:dropout層通常放在需要dropout的網絡層的前一層;
torch.nn.Dropout(p=0.5,inplace=False)

下面通過代碼分析Dropout層的作用:

import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from toolss.common_tools import set_seed
from torch.utils.tensorboard import SummaryWriter

set_seed(1)  # 設置隨機種子
n_hidden = 200
max_iter = 2000
disp_interval = 400
lr_init = 0.01


# ============================ step 1/5 數據 ============================
def gen_data(num_data=10, x_range=(-1, 1)):

    w = 1.5
    train_x = torch.linspace(*x_range, num_data).unsqueeze_(1)
    train_y = w*train_x + torch.normal(0, 0.5, size=train_x.size())
    test_x = torch.linspace(*x_range, num_data).unsqueeze_(1)
    test_y = w*test_x + torch.normal(0, 0.3, size=test_x.size())

    return train_x, train_y, test_x, test_y


train_x, train_y, test_x, test_y = gen_data(x_range=(-1, 1))


# ============================ step 2/5 模型 ============================
class MLP(nn.Module):
    def __init__(self, neural_num, d_prob=0.5):
        super(MLP, self).__init__()
        self.linears = nn.Sequential(

            nn.Linear(1, neural_num),
            nn.ReLU(inplace=True),

            nn.Dropout(d_prob),
            nn.Linear(neural_num, neural_num),
            nn.ReLU(inplace=True),

            nn.Dropout(d_prob),
            nn.Linear(neural_num, neural_num),
            nn.ReLU(inplace=True),

            nn.Dropout(d_prob),
            nn.Linear(neural_num, 1),
        )

    def forward(self, x):
        return self.linears(x)


net_prob_0 = MLP(neural_num=n_hidden, d_prob=0.)
net_prob_05 = MLP(neural_num=n_hidden, d_prob=0.5)

# ============================ step 3/5 優化器 ============================
optim_normal = torch.optim.SGD(net_prob_0.parameters(), lr=lr_init, momentum=0.9)
optim_reglar = torch.optim.SGD(net_prob_05.parameters(), lr=lr_init, momentum=0.9)

# ============================ step 4/5 損失函數 ============================
loss_func = torch.nn.MSELoss()

# ============================ step 5/5 迭代訓練 ============================

writer = SummaryWriter(comment='_test_tensorboard', filename_suffix="12345678")
for epoch in range(max_iter):

    pred_normal, pred_wdecay = net_prob_0(train_x), net_prob_05(train_x)
    loss_normal, loss_wdecay = loss_func(pred_normal, train_y), loss_func(pred_wdecay, train_y)

    optim_normal.zero_grad()
    optim_reglar.zero_grad()

    loss_normal.backward()
    loss_wdecay.backward()

    optim_normal.step()
    optim_reglar.step()

    if (epoch+1) % disp_interval == 0:

        net_prob_0.eval()
        net_prob_05.eval()

        # 可視化
        for name, layer in net_prob_0.named_parameters():
            writer.add_histogram(name + '_grad_normal', layer.grad, epoch)
            writer.add_histogram(name + '_data_normal', layer, epoch)

        for name, layer in net_prob_05.named_parameters():
            writer.add_histogram(name + '_grad_regularization', layer.grad, epoch)
            writer.add_histogram(name + '_data_regularization', layer, epoch)

        test_pred_prob_0, test_pred_prob_05 = net_prob_0(test_x), net_prob_05(test_x)

        # 繪圖
        plt.scatter(train_x.data.numpy(), train_y.data.numpy(), c='blue', s=50, alpha=0.3, label='train')
        plt.scatter(test_x.data.numpy(), test_y.data.numpy(), c='red', s=50, alpha=0.3, label='test')
        plt.plot(test_x.data.numpy(), test_pred_prob_0.data.numpy(), 'r-', lw=3, label='d_prob_0')
        plt.plot(test_x.data.numpy(), test_pred_prob_05.data.numpy(), 'b--', lw=3, label='d_prob_05')
        plt.text(-0.25, -1.5, 'd_prob_0 loss={:.8f}'.format(loss_normal.item()), fontdict={'size': 15, 'color': 'red'})
        plt.text(-0.25, -2, 'd_prob_05 loss={:.6f}'.format(loss_wdecay.item()), fontdict={'size': 15, 'color': 'red'})

        plt.ylim((-2.5, 2.5))
        plt.legend(loc='upper left')
        plt.title("Epoch: {}".format(epoch+1))
        plt.show()
        plt.close()

        net_prob_0.train()
        net_prob_05.train()

代碼的圖輸出如下所示:
在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章