dropout最早出現在AlexNet中,它對CNN的發展起到了一定程度的推進作用,本章我們從0開始來實現一個dropout方法。
實現dropout:
def dropout(X, drop_probability):
keep_prob = 1 - drop_probability # 留着的概率
if keep_prob == 0: # 這種情況把所有元素全部丟棄
return X.zeros_like()
# 隨機選擇一部分該層的輸出作爲丟棄元素
# mask與keep_prob相比,小於變成1,大於變成0
mask = nd.random.uniform(low=0, high=1, shape=X.shape) < keep_prob
# 保證E[dropout(X)]==X,保證dropout之後的期望值與X一樣
scale = 1 / keep_prob
return mask * X * scale
運行一個實例看看:
1)概率爲0:
# 運行一個實例看看
A=nd.arange(20).reshape((5,4))
print(A)
print("==========")
print(dropout(A,0)) # 當概率爲0,就是不使用dropout時
結果:
2)概率爲0.5:
print(dropout(A,0.5)) # 當概率爲0.5,就是有50%的元素會被置爲0
結果:
有一般地可能性會變成0,剩下的值全部乘以2。
3)當概率爲1.0時:
print(dropout(A,1.0))
全部元素均變爲0。
加入dropout的MLP
下面我們實現一個MLP,不過這個MLP引入了dropout方法,具體的變化主要在全連接層中加入了dropout:
drop_out1 = 0.2 # dropout只在訓練時使用
drop_out2 = 0.5
def net(x,dropout1,dropout2):
x=x.reshape(-1,num_input)
# 第一層全連接
h1=relu(nd.dot(x,w1)+b1)
h1=dropout(h1,dropout1) # 加入dropout
# 第二層全連接
h2=nd.dot(h1,w2)+b2
h2=dropout(h2,dropout2)
# 輸出層
output=nd.dot(h2,w3)+b3# 最後一層一般不做激活
return output
剩下的代碼基本上與MXNET深度學習框架-07-從0開始實現多層感知機(MLP)一致。下面放上所有代碼(dropout只在模型訓練時使用!!!):
import mxnet.gluon as gn
import mxnet.autograd as ag
import mxnet.ndarray as nd
def dropout(X, drop_probability):
keep_prob = 1 - drop_probability # 留着的概率
if keep_prob == 0: # 這種情況把所有元素全部丟棄
return X.zeros_like()
# 隨機選擇一部分該層的輸出作爲丟棄元素
# mask與keep_prob相比,小於變成1,大於變成0
mask = nd.random.uniform(low=0, high=1, shape=X.shape) < keep_prob
# 保證E[dropout(X)]==X,保證dropout之後的期望值與X一樣
scale = 1 / keep_prob
return mask * X * scale
#
# # 運行一個實例看看
# A=nd.arange(20).reshape((5,4))
# print(A)
# print("==========")
# # print(dropout(A,0)) # 當概率爲0,就是不使用dropout時
#
# # print(dropout(A,0.5))
# print(dropout(A,1.0))
'''---模型訓練實例(引入dropout)---'''
# 下面的代碼完全引入<從0開始的MLP>代碼,只是多加了一層隱藏層
def transform(data, label):
return data.astype("float32") / 255, label.astype("float32") # 樣本歸一化
mnist_train = gn.data.vision.FashionMNIST(train=True)
mnist_test = gn.data.vision.FashionMNIST(train=False)
data, label = mnist_train[0:9]
print(data.shape, label) # 查看數據維度
import matplotlib.pyplot as plt
def show_image(image): # 顯示圖像
n = image.shape[0]
_, figs = plt.subplots(1, n, figsize=(15, 15))
for i in range(n):
figs[i].imshow(image[i].reshape((28, 28)).asnumpy())
plt.show()
def get_fashion_mnist_labels(labels): # 顯示圖像標籤
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
#
# show_image(data)
# print(get_fashion_mnist_labels(label))
'''----數據讀取----'''
batch_size = 100
transformer = gn.data.vision.transforms.ToTensor()
train_data = gn.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True)
test_data = gn.data.DataLoader(dataset=mnist_test, batch_size=batch_size, shuffle=False)
'''----初始化模型參數----'''
num_input = 28 * 28 * 1
num_output = 10
num_hidden1=256 # 隱藏層神經元個數
num_hidden2=128 # 隱藏層神經元個數
w1 = nd.random_normal(shape=(num_input, num_hidden1),scale=0.01) # 初始化(0.01內的值)
b1 = nd.zeros(shape=(num_hidden1))
w2 = nd.random_normal(shape=(num_hidden1, num_hidden2),scale=0.01)
b2 = nd.zeros(shape=(num_hidden2))
w3 = nd.random_normal(shape=(num_hidden2, num_output),scale=0.01)
b3 = nd.zeros(shape=(num_output))
params = [w1, b1,w2,b2,w3,b3]
for param in params:
param.attach_grad() # 開闢臨時空間
# 定義激活函數relu
def relu(x):
return nd.maximum(0,x)
'''----定義模型----'''
# 所謂的模型就是將全連接層與relu串起來(這裏使用dropout方法)
drop_out1 = 0.2 # dropout只在訓練時使用
drop_out2 = 0.5
def net(x,dropout1,dropout2):
x=x.reshape(-1,num_input)
# 第一層全連接
h1=relu(nd.dot(x,w1)+b1)
h1=dropout(h1,dropout1) # 加入dropout
# 第二層全連接
h2=nd.dot(h1,w2)+b2
h2=dropout(h2,dropout2)
# 輸出層
output=nd.dot(h2,w3)+b3# 最後一層一般不做激活
return output
# softmax和交叉熵損失函數
# 由於將它們分開會導致數值不穩定(前兩章博文的結果可以對比),所以直接使用gluon提供的API
cross_loss=gn.loss.SoftmaxCrossEntropyLoss()
# 定義準確率
def accuracy(output,label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def evaluate_accuracy(data_iter,net):# 定義測試集準確率
acc=0
for data,label in data_iter:
data,label=transform(data,label)
output=net(data,dropout1=0,dropout2=0) # 測試的時候dropout必須爲0
acc+=accuracy(output,label)
return acc/len(data_iter)
# 梯度下降優化器
def SGD(params,lr):
for pa in params:
pa[:]=pa-lr*pa.grad # 參數沿着梯度的反方向走特定距離
# 訓練
lr=0.1
epochs=20
for epoch in range(epochs):
train_loss=0
train_acc=0
for image,y in train_data:
image,y=transform(image,y) # 類型轉換,數據歸一化
with ag.record():
output=net(image,dropout1=drop_out1,dropout2=drop_out2)
loss=cross_loss(output,y)
loss.backward()
# 將梯度做平均,這樣學習率不會對batch_size那麼敏感
SGD(params,lr/batch_size)
train_loss+=nd.mean(loss).asscalar()
train_acc+=accuracy(output,y)
test_acc=evaluate_accuracy(test_data,net)
print("Epoch %d, Loss:%f, Train acc:%f, Test acc:%f"
%(epoch,train_loss/len(train_data),train_acc/len(train_data),test_acc))
'''----預測-------'''
# 訓練完成後,可對樣本進行預測
image_10,label_10=mnist_test[:10] #拿到前10個數據
show_image(image_10)
print("真實樣本標籤:",label_10)
print("真實數字標籤對應的服飾名:",get_fashion_mnist_labels(label_10))
image_10,label_10=transform(image_10,label_10)
predict_label=net(image_10,dropout1=0,dropout2=0).argmax(axis=1)
print("預測樣本標籤:",predict_label.astype("int8"))
print("預測數字標籤對應的服飾名:",get_fashion_mnist_labels(predict_label.asnumpy()))
訓練過程: