吳恩達Coursera深度學習課程 deeplearning.ai (2-2) 優化算法--編程作業

可執行源碼:https://download.csdn.net/download/haoyutiangang/10495511

初始化

導包

import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets

from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *

%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

有用的方法

def load_params_and_grads(seed=1):
    np.random.seed(seed)
    W1 = np.random.randn(2,3)
    b1 = np.random.randn(2,1)
    W2 = np.random.randn(3,3)
    b2 = np.random.randn(3,1)

    dW1 = np.random.randn(2,3)
    db1 = np.random.randn(2,1)
    dW2 = np.random.randn(3,3)
    db2 = np.random.randn(3,1)

    return W1, b1, W2, b2, dW1, db1, dW2, db2


def initialize_parameters(layer_dims):
    """
    Arguments:
    layer_dims -- python array (list) containing the dimensions of each layer in our network

    Returns:
    parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
                    W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
                    b1 -- bias vector of shape (layer_dims[l], 1)
                    Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
                    bl -- bias vector of shape (1, layer_dims[l])

    Tips:
    - For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1]. 
    This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
    - In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
    """

    np.random.seed(3)
    parameters = {}
    L = len(layer_dims) # number of layers in the network

    for l in range(1, L):
        parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])*  np.sqrt(2 / layer_dims[l-1])
        parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))

        assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])
        assert(parameters['W' + str(l)].shape == layer_dims[l], 1)

    return parameters


def forward_propagation(X, parameters):
    """
    Implements the forward propagation (and computes the loss) presented in Figure 2.

    Arguments:
    X -- input dataset, of shape (input size, number of examples)
    parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
                    W1 -- weight matrix of shape ()
                    b1 -- bias vector of shape ()
                    W2 -- weight matrix of shape ()
                    b2 -- bias vector of shape ()
                    W3 -- weight matrix of shape ()
                    b3 -- bias vector of shape ()

    Returns:
    loss -- the loss function (vanilla logistic loss)
    """

    # retrieve parameters
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    W3 = parameters["W3"]
    b3 = parameters["b3"]

    # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
    z1 = np.dot(W1, X) + b1
    a1 = relu(z1)
    z2 = np.dot(W2, a1) + b2
    a2 = relu(z2)
    z3 = np.dot(W3, a2) + b3
    a3 = sigmoid(z3)

    cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)

    return a3, cache


def backward_propagation(X, Y, cache):
    """
    Implement the backward propagation presented in figure 2.

    Arguments:
    X -- input dataset, of shape (input size, number of examples)
    Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
    cache -- cache output from forward_propagation()

    Returns:
    gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
    """
    m = X.shape[1]
    (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache

    dz3 = 1./m * (a3 - Y)
    dW3 = np.dot(dz3, a2.T)
    db3 = np.sum(dz3, axis=1, keepdims = True)

    da2 = np.dot(W3.T, dz3)
    dz2 = np.multiply(da2, np.int64(a2 > 0))
    dW2 = np.dot(dz2, a1.T)
    db2 = np.sum(dz2, axis=1, keepdims = True)

    da1 = np.dot(W2.T, dz2)
    dz1 = np.multiply(da1, np.int64(a1 > 0))
    dW1 = np.dot(dz1, X.T)
    db1 = np.sum(dz1, axis=1, keepdims = True)

    gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,
                 "da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
                 "da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}

    return gradients


def compute_cost(a3, Y):

    """
    Implement the cost function

    Arguments:
    a3 -- post-activation, output of forward propagation
    Y -- "true" labels vector, same shape as a3

    Returns:
    cost - value of the cost function
    """
    m = Y.shape[1]

    logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
    cost = 1./m * np.sum(logprobs)

    return cost


def predict(X, y, parameters):
    """
    This function is used to predict the results of a  n-layer neural network.

    Arguments:
    X -- data set of examples you would like to label
    parameters -- parameters of the trained model

    Returns:
    p -- predictions for the given dataset X
    """

    m = X.shape[1]
    p = np.zeros((1,m), dtype = np.int)

    # Forward propagation
    a3, caches = forward_propagation(X, parameters)

    # convert probas to 0/1 predictions
    for i in range(0, a3.shape[1]):
        if a3[0,i] > 0.5:
            p[0,i] = 1
        else:
            p[0,i] = 0

    # print results

    #print ("predictions: " + str(p[0,:]))
    #print ("true labels: " + str(y[0,:]))
    print("Accuracy: "  + str(np.mean((p[0,:] == y[0,:]))))

    return p


def predict_dec(parameters, X):
    """
    Used for plotting decision boundary.

    Arguments:
    parameters -- python dictionary containing your parameters 
    X -- input data of size (m, K)

    Returns
    predictions -- vector of predictions of our model (red: 0 / blue: 1)
    """

    # Predict using forward propagation and a classification threshold of 0.5
    a3, cache = forward_propagation(X, parameters)
    predictions = (a3 > 0.5)
    return predictions


def plot_decision_boundary(model, X, y):
    # Set min and max values and give it some padding
    x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
    y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
    h = 0.01
    # Generate a grid of points with distance h between them
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    # Predict the function value for the whole grid
    Z = model(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    # Plot the contour and training examples
    plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
    plt.ylabel('x2')
    plt.xlabel('x1')
    plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
    plt.show()


def load_dataset():
    np.random.seed(3)
    train_X, train_Y = sklearn.datasets.make_moons(n_samples=300, noise=.2) #300 #0.2 
    # Visualize the data
    plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);
    train_X = train_X.T
    train_Y = train_Y.reshape((1, train_Y.shape[0]))

    return train_X, train_Y

1. 梯度下降

最簡單的優化算法是梯度下降(gradient descent (GD)),對m個樣本來說,每次迭代都需要計算所有m個樣本的值,完成前向傳播、後向傳播和梯度更新, 又叫Batch Gradient Descent。

熱身練習

實現梯度下降的算法

for:l=1,...,LW[l]=W[l]αdW[l]b[l]=b[l]αdb[l]
# GRADED FUNCTION: update_parameters_with_gd

def update_parameters_with_gd(parameters, grads, learning_rate):
    """
    Update parameters using one step of gradient descent

    Arguments:
    parameters -- python dictionary containing your parameters to be updated:
                    parameters['W' + str(l)] = Wl
                    parameters['b' + str(l)] = bl
    grads -- python dictionary containing your gradients to update each parameters:
                    grads['dW' + str(l)] = dWl
                    grads['db' + str(l)] = dbl
    learning_rate -- the learning rate, scalar.

    Returns:
    parameters -- python dictionary containing your updated parameters 
    """

    L = len(parameters) // 2 # number of layers in the neural networks

    # Update rule for each parameter
    for l in range(L):
        ### START CODE HERE ### (approx. 2 lines)
        parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads["dW" + str(l+1)]
        parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads["db" + str(l+1)]
        ### END CODE HERE ###

    return parameters




parameters, grads, learning_rate = update_parameters_with_gd_test_case()

parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))

# W1 = [[ 1.63535156 -0.62320365 -0.53718766]
#  [-1.07799357  0.85639907 -2.29470142]]
# b1 = [[ 1.74604067]
#  [-0.75184921]]
# W2 = [[ 0.32171798 -0.25467393  1.46902454]
#  [-2.05617317 -0.31554548 -0.3756023 ]
#  [ 1.1404819  -1.09976462 -0.1612551 ]]
# b2 = [[-0.88020257]
#  [ 0.02561572]
#  [ 0.57539477]]

有一種變體是隨機梯度下降(Stochastic Gradient Descent (SGD)),對於m各樣本來說,每個樣本迭代一次,m個樣本迭代m次, 是mini-batch 的特殊情況。

(Batch) Gradient Descent:

X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
    # Forward propagation
    a, caches = forward_propagation(X, parameters)
    # Compute cost.
    cost = compute_cost(a, Y)
    # Backward propagation.
    grads = backward_propagation(a, caches, parameters)
    # Update parameters.
    parameters = update_parameters(parameters, grads)

Stochastic Gradient Descent:

X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
    for j in range(0, m):
        # Forward propagation
        a, caches = forward_propagation(X[:,j], parameters)
        # Compute cost
        cost = compute_cost(a, Y[:,j])
        # Backward propagation
        grads = backward_propagation(a, caches, parameters)
        # Update parameters.
        parameters = update_parameters(parameters, grads)
  • batch 方法震盪小,下降迅速,但是每次計算時間長
  • stochastic 方法震盪大,下降緩慢,但是每次計算時間短
    image

實際計算中,可以取中,mini-batch,將batch分成一定數量的mini-batch, 每個mini-batch 迭代一次,使梯度下降比較迅速的同時計算時間也比較短
image

2 Mini-Batch 梯度下降

基於數據集(X,Y) 構造mini-batchs.

兩步走:shuffle + partition

Shuffle

列向量表示一個樣本,隨機變換樣本位置(X 與 Y同步變換)使樣本隨機分佈於每個mini-batch。
image

Partition

將shuffle之後的數據集分爲mini-batch_size 份(這裏是64份),由於可能除不盡,所以最後一份數據量可能少一些。
image

練習 實現random_mini_batches

  • 已經完成shuffle階段,請完成partition階段
  • 整除時正好份數
  • 不整除時有下取整p個完整的mini-batch, 一個 m-p*mini-batch
# GRADED FUNCTION: random_mini_batches

def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
    """
    Creates a list of random minibatches from (X, Y)

    Arguments:
    X -- input data, of shape (input size, number of examples)
    Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
    mini_batch_size -- size of the mini-batches, integer

    Returns:
    mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
    """

    np.random.seed(seed)            # To make your "random" minibatches the same as ours
    m = X.shape[1]                  # number of training examples
    mini_batches = []

    # Step 1: Shuffle (X, Y)
    permutation = list(np.random.permutation(m))
    shuffled_X = X[:, permutation]
    shuffled_Y = Y[:, permutation].reshape((1,m))

    # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
    num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
    for k in range(0, num_complete_minibatches):
        ### START CODE HERE ### (approx. 2 lines)
        mini_batch_X = shuffled_X[:, k*mini_batch_size : (k+1)*mini_batch_size]
        mini_batch_Y = shuffled_Y[:, k*mini_batch_size : (k+1)*mini_batch_size]
        ### END CODE HERE ###
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    # Handling the end case (last mini-batch < mini_batch_size)
    if m % mini_batch_size != 0:
        ### START CODE HERE ### (approx. 2 lines)
        mini_batch_X = shuffled_X[:, num_complete_minibatches*mini_batch_size : m]
        mini_batch_Y = shuffled_Y[:, num_complete_minibatches*mini_batch_size : m]
        ### END CODE HERE ###
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    return mini_batches




X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)

print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape)) 
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))

# shape of the 1st mini_batch_X: (12288, 64)
# shape of the 2nd mini_batch_X: (12288, 64)
# shape of the 3rd mini_batch_X: (12288, 20)
# shape of the 1st mini_batch_Y: (1, 64)
# shape of the 2nd mini_batch_Y: (1, 64)
# shape of the 3rd mini_batch_Y: (1, 20)
# mini batch sanity check: [ 0.90085595 -0.7612069   0.2344157 ]

注意:

  • mini-batch 算法分爲兩個階段:shuffle 和 partition
  • mini-batch 大小應該取2的冪次,比如:16,32,64,128

3 Momentum

因爲mini-batch算法是每個mini-batch迭代一次,梯度下降時會有一定的震盪,利用Momentum算法可以減少震盪。

簡介

Momentum算法計算速度v時不只是參考當前的速度和加速度,而是參考了之前的綜合速度和當前的加速度,可以認爲是之前幾次的速度與此次的加速度綜合出來的一個速度。

Valocity=βValocity+(1β)dVcurrent

image

練習 初始化v: velocity

for l=0,…,L-1

v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# GRADED FUNCTION: initialize_velocity

def initialize_velocity(parameters):
    """
    Initializes the velocity as a python dictionary with:
                - keys: "dW1", "db1", ..., "dWL", "dbL" 
                - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
    Arguments:
    parameters -- python dictionary containing your parameters.
                    parameters['W' + str(l)] = Wl
                    parameters['b' + str(l)] = bl

    Returns:
    v -- python dictionary containing the current velocity.
                    v['dW' + str(l)] = velocity of dWl
                    v['db' + str(l)] = velocity of dbl
    """

    L = len(parameters) // 2 # number of layers in the neural networks
    v = {}

    # Initialize velocity
    for l in range(L): //range from start to end-1
        ### START CODE HERE ### (approx. 2 lines)
        v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
        v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
        ### END CODE HERE ###

    return v

##############################################################

parameters = initialize_velocity_test_case()

v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))

# v["dW1"] = [[ 0.  0.  0.]
#  [ 0.  0.  0.]]
# v["db1"] = [[ 0.]
#  [ 0.]]
# v["dW2"] = [[ 0.  0.  0.]
#  [ 0.  0.  0.]
#  [ 0.  0.  0.]]
# v["db2"] = [[ 0.]
#  [ 0.]
#  [ 0.]]

練習 實現Momentum的更新機制

更新算法:for l=1,…,L

{vdW[l]=βvdW[l]+(1β)dW[l]W[l]=W[l]αvdW[l]

{vdb[l]=βvdb[l]+(1β)db[l]b[l]=b[l]αvdb[l]
  • l: 表示層數
  • β : 表示Momentum
  • α : 表示學習率

注意range含首不含尾,所以從0到l-1, 而我們需要下標1到l,所以需要+1

# GRADED FUNCTION: update_parameters_with_momentum

def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
    """
    Update parameters using Momentum

    Arguments:
    parameters -- python dictionary containing your parameters:
                    parameters['W' + str(l)] = Wl
                    parameters['b' + str(l)] = bl
    grads -- python dictionary containing your gradients for each parameters:
                    grads['dW' + str(l)] = dWl
                    grads['db' + str(l)] = dbl
    v -- python dictionary containing the current velocity:
                    v['dW' + str(l)] = ...
                    v['db' + str(l)] = ...
    beta -- the momentum hyperparameter, scalar
    learning_rate -- the learning rate, scalar

    Returns:
    parameters -- python dictionary containing your updated parameters 
    v -- python dictionary containing your updated velocities
    """

    L = len(parameters) // 2 # number of layers in the neural networks

    # Momentum update for each parameter
    for l in range(L):

        ### START CODE HERE ### (approx. 4 lines)
        # compute velocities
        v["dW" + str(l+1)] = beta*v["dW" + str(l+1)] + (1-beta)*grads["dW" + str(l+1)]
        v["db" + str(l+1)] = beta*v["db" + str(l+1)] + (1-beta)*grads["db" + str(l+1)]
        # update parameters
        parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*v["dW" + str(l+1)]
        parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*v["db" + str(l+1)]
        ### END CODE HERE ###

    return parameters, v

##############################################################

parameters, grads, v = update_parameters_with_momentum_test_case()

parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))

# W1 = [[ 1.62544598 -0.61290114 -0.52907334]
#  [-1.07347112  0.86450677 -2.30085497]]
# b1 = [[ 1.74493465]
#  [-0.76027113]]
# W2 = [[ 0.31930698 -0.24990073  1.4627996 ]
#  [-2.05974396 -0.32173003 -0.38320915]
#  [ 1.13444069 -1.0998786  -0.1713109 ]]
# b2 = [[-0.87809283]
#  [ 0.04055394]
#  [ 0.58207317]]
# v["dW1"] = [[-0.11006192  0.11447237  0.09015907]
#  [ 0.05024943  0.09008559 -0.06837279]]
# v["db1"] = [[-0.01228902]
#  [-0.09357694]]
# v["dW2"] = [[-0.02678881  0.05303555 -0.06916608]
#  [-0.03967535 -0.06871727 -0.08452056]
#  [-0.06712461 -0.00126646 -0.11173103]]
# v["db2"] = [[ 0.02344157]
#  [ 0.16598022]
#  [ 0.07420442]]

注意:

  • velocity初始化爲0,需要幾次迭代後velocity才能達到一定值。
  • 如果 β = 0, 就變成了標準的梯度下降算法。

如何選擇 β

  • β 過大,相當於加速度不明顯,則曲線過於平緩
  • β 太小,接近於標準梯度下降算法,起不到平緩震盪的作用
  • β 通常選則[0.8, 0.999] ,一般選擇0.9就可以了
  • 你也可以嘗試幾個不同的β ,看看哪種對於縮小損失函數 J 的作用更好

知識點

  • Momentum是利用過去幾次的梯度來平緩梯度下降的步伐,可以應用於
    • batch 梯度下降
    • mini-batch 梯度下降
    • stichastic 梯度下降
  • 需要調整兩個超參數
    • α : 學習率
    • β : Momentum

4 Adam

Adam算法是訓練神經網絡的最有效的優化算法之一,它綜合了RMSProp算法(見課件)和Momentum算法。

步驟

  1. 計算momentum的V和修正後的Vcorrected
  2. 計算RMSProp的S和修正後的Scorrected
  3. 更新參數

for l = 1…L:

{vdW[l]=β1vdW[l]+(1β1)JW[l]vdW[l]corrected=vdW[l]1(β1)tsdW[l]=β2sdW[l]+(1β2)(JW[l])2sdW[l]corrected=sdW[l]1(β1)tW[l]=W[l]αvdW[l]correctedsdW[l]corrected+ε

其中:

  • t: Adam 算法的運行步數
  • L: 層數
  • β1 和 β2 是控制兩個指數權重均值的超參數
  • α: 學習率
  • ε: 防止除數是零的一個很小的數

初始化

# GRADED FUNCTION: initialize_adam

def initialize_adam(parameters) :
    """
    Initializes v and s as two python dictionaries with:
                - keys: "dW1", "db1", ..., "dWL", "dbL" 
                - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.

    Arguments:
    parameters -- python dictionary containing your parameters.
                    parameters["W" + str(l)] = Wl
                    parameters["b" + str(l)] = bl

    Returns: 
    v -- python dictionary that will contain the exponentially weighted average of the gradient.
                    v["dW" + str(l)] = ...
                    v["db" + str(l)] = ...
    s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
                    s["dW" + str(l)] = ...
                    s["db" + str(l)] = ...

    """

    L = len(parameters) // 2 # number of layers in the neural networks
    v = {}
    s = {}

    # Initialize v, s. Input: "parameters". Outputs: "v, s".
    for l in range(L):
    ### START CODE HERE ### (approx. 4 lines)
        v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
        v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
        s["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
        s["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
    ### END CODE HERE ###

    return v, s

##############################################################

parameters = initialize_adam_test_case()

v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))


# v["dW1"] = [[ 0.  0.  0.]
#  [ 0.  0.  0.]]
# v["db1"] = [[ 0.]
#  [ 0.]]
# v["dW2"] = [[ 0.  0.  0.]
#  [ 0.  0.  0.]
#  [ 0.  0.  0.]]
# v["db2"] = [[ 0.]
#  [ 0.]
#  [ 0.]]
# s["dW1"] = [[ 0.  0.  0.]
#  [ 0.  0.  0.]]
# s["db1"] = [[ 0.]
#  [ 0.]]
# s["dW2"] = [[ 0.  0.  0.]
#  [ 0.  0.  0.]
#  [ 0.  0.  0.]]
# s["db2"] = [[ 0.]
#  [ 0.]
#  [ 0.]]

參數更新

# GRADED FUNCTION: update_parameters_with_adam

def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
                                beta1 = 0.9, beta2 = 0.999,  epsilon = 1e-8):
    """
    Update parameters using Adam

    Arguments:
    parameters -- python dictionary containing your parameters:
                    parameters['W' + str(l)] = Wl
                    parameters['b' + str(l)] = bl
    grads -- python dictionary containing your gradients for each parameters:
                    grads['dW' + str(l)] = dWl
                    grads['db' + str(l)] = dbl
    v -- Adam variable, moving average of the first gradient, python dictionary
    s -- Adam variable, moving average of the squared gradient, python dictionary
    learning_rate -- the learning rate, scalar.
    beta1 -- Exponential decay hyperparameter for the first moment estimates 
    beta2 -- Exponential decay hyperparameter for the second moment estimates 
    epsilon -- hyperparameter preventing division by zero in Adam updates

    Returns:
    parameters -- python dictionary containing your updated parameters 
    v -- Adam variable, moving average of the first gradient, python dictionary
    s -- Adam variable, moving average of the squared gradient, python dictionary
    """

    L = len(parameters) // 2                 # number of layers in the neural networks
    v_corrected = {}                         # Initializing first moment estimate, python dictionary
    s_corrected = {}                         # Initializing second moment estimate, python dictionary

    # Perform Adam update on all parameters
    for l in range(L):
        # Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
        ### START CODE HERE ### (approx. 2 lines)
        v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads['dW' + str(l+1)]
        v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads['db' + str(l+1)]
        ### END CODE HERE ###

        # Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
        ### START CODE HERE ### (approx. 2 lines)
        v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1 ** t)
        v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1 ** t)
        ### END CODE HERE ###

        # Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
        ### START CODE HERE ### (approx. 2 lines)
        s["dW" + str(l+1)] = s["dW" + str(l+1)] + (1 - beta2) * (grads['dW' + str(l+1)] ** 2)
        s["db" + str(l+1)] = s["db" + str(l+1)] + (1 - beta2) * (grads['db' + str(l+1)] ** 2)
        ### END CODE HERE ###

        # Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
        ### START CODE HERE ### (approx. 2 lines)
        s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2 ** t)
        s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2 ** t)
        ### END CODE HERE ###

        # Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
        ### START CODE HERE ### (approx. 2 lines)
        parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * ( v_corrected["dW" + str(l+1)] / (np.sqrt(s_corrected["dW" + str(l+1)]) + epsilon))
        parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * ( v_corrected["db" + str(l+1)] / (np.sqrt(s_corrected["db" + str(l+1)]) + epsilon))
        ### END CODE HERE ###

    return parameters, v, s

##############################################################

parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s  = update_parameters_with_adam(parameters, grads, v, s, t = 2)

print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))


# W1 = [[ 1.63178673 -0.61919778 -0.53561312]
#  [-1.08040999  0.85796626 -2.29409733]]
# b1 = [[ 1.75225313]
#  [-0.75376553]]
# W2 = [[ 0.32648046 -0.25681174  1.46954931]
#  [-2.05269934 -0.31497584 -0.37661299]
#  [ 1.14121081 -1.09244991 -0.16498684]]
# b2 = [[-0.88529979]
#  [ 0.03477238]
#  [ 0.57537385]]
# v["dW1"] = [[-0.11006192  0.11447237  0.09015907]
#  [ 0.05024943  0.09008559 -0.06837279]]
# v["db1"] = [[-0.01228902]
#  [-0.09357694]]
# v["dW2"] = [[-0.02678881  0.05303555 -0.06916608]
#  [-0.03967535 -0.06871727 -0.08452056]
#  [-0.06712461 -0.00126646 -0.11173103]]
# v["db2"] = [[ 0.02344157]
#  [ 0.16598022]
#  [ 0.07420442]]
# s["dW1"] = [[ 0.00121136  0.00131039  0.00081287]
#  [ 0.0002525   0.00081154  0.00046748]]
# s["db1"] = [[  1.51020075e-05]
#  [  8.75664434e-04]]
# s["dW2"] = [[  7.17640232e-05   2.81276921e-04   4.78394595e-04]
#  [  1.57413361e-04   4.72206320e-04   7.14372576e-04]
#  [  4.50571368e-04   1.60392066e-07   1.24838242e-03]]
# s["db2"] = [[  5.49507194e-05]
#  [  2.75494327e-03]
#  [  5.50629536e-04]]

現在你已經有了三種算法(mini-bath梯度下降,Momentum, Adam)。
下面比較一下他們優化的效果。

5 多種不同優化算法的模型

使用”moons”數據集進行測試

train_X, train_Y = load_dataset()

image

我們已經實現了一個三層的神經網絡,下面用不同的優化算法進行訓練:

  • Mini-batch Gradient Descent
    • update_parameters_with_gd()
  • Mini-batch Momentum:
    • initialize_velocity()
    • update_parameters_with_momentum()
  • Mini-batch Adam:
    • initialize_adam()
    • update_parameters_with_adam()
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
          beta1 = 0.9, beta2 = 0.999,  epsilon = 1e-8, num_epochs = 10000, print_cost = True):
    """
    3-layer neural network model which can be run in different optimizer modes.

    Arguments:
    X -- input data, of shape (2, number of examples)
    Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
    layers_dims -- python list, containing the size of each layer
    learning_rate -- the learning rate, scalar.
    mini_batch_size -- the size of a mini batch
    beta -- Momentum hyperparameter
    beta1 -- Exponential decay hyperparameter for the past gradients estimates 
    beta2 -- Exponential decay hyperparameter for the past squared gradients estimates 
    epsilon -- hyperparameter preventing division by zero in Adam updates
    num_epochs -- number of epochs
    print_cost -- True to print the cost every 1000 epochs

    Returns:
    parameters -- python dictionary containing your updated parameters 
    """

    L = len(layers_dims)             # number of layers in the neural networks
    costs = []                       # to keep track of the cost
    t = 0                            # initializing the counter required for Adam update
    seed = 10                        # For grading purposes, so that your "random" minibatches are the same as ours

    # Initialize parameters
    parameters = initialize_parameters(layers_dims)

    # Initialize the optimizer
    if optimizer == "gd":
        pass # no initialization required for gradient descent
    elif optimizer == "momentum":
        v = initialize_velocity(parameters)
    elif optimizer == "adam":
        v, s = initialize_adam(parameters)

    # Optimization loop
    for i in range(num_epochs):

        # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
        seed = seed + 1
        minibatches = random_mini_batches(X, Y, mini_batch_size, seed)

        for minibatch in minibatches:

            # Select a minibatch
            (minibatch_X, minibatch_Y) = minibatch

            # Forward propagation
            a3, caches = forward_propagation(minibatch_X, parameters)

            # Compute cost
            cost = compute_cost(a3, minibatch_Y)

            # Backward propagation
            grads = backward_propagation(minibatch_X, minibatch_Y, caches)

            # Update parameters
            if optimizer == "gd":
                parameters = update_parameters_with_gd(parameters, grads, learning_rate)
            elif optimizer == "momentum":
                parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
            elif optimizer == "adam":
                t = t + 1 # Adam counter
                parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
                                                               t, learning_rate, beta1, beta2,  epsilon)

        # Print the cost every 1000 epoch
        if print_cost and i % 1000 == 0:
            print ("Cost after epoch %i: %f" %(i, cost))
        if print_cost and i % 100 == 0:
            costs.append(cost)

    # plot the cost
    plt.plot(costs)
    plt.ylabel('cost')
    plt.xlabel('epochs (per 100)')
    plt.title("Learning rate = " + str(learning_rate))
    plt.show()

    return parameters

現在可以試試了

5.1 Mini-batch Gradient descent

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)

# Cost after epoch 0: 0.690736
# Cost after epoch 1000: 0.685273
# Cost after epoch 2000: 0.647072
# Cost after epoch 3000: 0.619525
# Cost after epoch 4000: 0.576584
# Cost after epoch 5000: 0.607243
# Cost after epoch 6000: 0.529403
# Cost after epoch 7000: 0.460768
# Cost after epoch 8000: 0.465586
# Cost after epoch 9000: 0.464518
#
# Accuracy: 0.796666666667

image
image

5.2 Mini-batch Momentum

由於數據量下,我們使用的增益較小

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)

# Cost after epoch 0: 0.690741
# Cost after epoch 1000: 0.685341
# Cost after epoch 2000: 0.647145
# Cost after epoch 3000: 0.619594
# Cost after epoch 4000: 0.576665
# Cost after epoch 5000: 0.607324
# Cost after epoch 6000: 0.529476
# Cost after epoch 7000: 0.460936
# Cost after epoch 8000: 0.465780
# Cost after epoch 9000: 0.464740
# 
# Accuracy: 0.796666666667

image
image

5.3 Mini-batch Adam

# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")

# Predict
predictions = predict(train_X, train_Y, parameters)

# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)

# Cost after epoch 0: 0.690552
# Cost after epoch 1000: 0.233787
# Cost after epoch 2000: 0.179942
# Cost after epoch 3000: 0.099978
# Cost after epoch 4000: 0.142203
# Cost after epoch 5000: 0.114152
# Cost after epoch 6000: 0.128446
# Cost after epoch 7000: 0.042047
# Cost after epoch 8000: 0.132215
# Cost after epoch 9000: 0.214512
# 
# Accuracy: 0.936666666667

image
image

5.4 總結

優化算法 準確率 梯度下降趨勢
Mini-bath Gradient descent 79.7% 震盪
Mini-batch Momentum 79.7% 震盪
Mini-batch Adam 94% 震盪
  • Momentum 算法通常是有幫助的,但是小而簡單的數據集影響了它的表現
  • 劇烈的震盪部分原因是某些mini-batch不太適用於這個算法
  • Adam算法明顯比其它兩種算法效果要好,如果你循環更多的代數(走完一次全量爲一代)會發現三種算法表現都不錯,不過Adam收斂的更快
  • Adam算法的優勢:
    • 相對較低的內存消耗(即使比其它兩種內存消耗略高)
    • 超參數不用調整太多就可以表現良好。

課外延伸

Adam 論文:https://arxiv.org/pdf/1412.6980.pdf

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章