Coursera-MachineLearning-NeuralNetwork(2)

Coursera機器學習-吳恩達

上一篇神經網絡只有正向傳播,這次加入反向傳播。
背景:同樣是識別手寫體。
在這裏插入圖片描述

% 1.加載並可視化數據

%% 初始化
clear ; close all; clc

%% 設置參數
input_layer_size  = 400;  % 20x20 Input Images of Digits
hidden_layer_size = 25;   % 25 hidden units
num_labels = 10;          % 10 labels, from 1 to 10   
                          % (note that we have mapped "0" to label 10)
                         
% 讀取訓練集數據
load('ex4data1.mat');
m = size(X, 1);

% 隨機選擇 100 個數據展示
sel = randperm(size(X, 1));
sel = sel(1:100);
% 2.加載神經網絡參數

load('ex4weights.mat');

% 將這些參數向量化展開
nn_params = [Theta1(:) ; Theta2(:)];
% 3.計算代價函數(神經網絡 正向傳播)
				% 建議第一次計算代價函數不要加上正規化,
				% 即將lambda置0

lambda = 0;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
                   num_labels, X, y, lambda);

% 函數 nnCostFunction:
function [J grad] = nnCostFunction(nn_params, ...
                                   input_layer_size, ...
                                   hidden_layer_size, ...
                                   num_labels, ...
                                   X, y, lambda)

% 將nn_params重新轉爲theta1,theta2
% Theta1 has size 25 x 401  
% Theta2 has size 10 x 26  
Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
                 hidden_layer_size, (input_layer_size + 1));
Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
                 num_labels, (hidden_layer_size + 1));

m = size(X, 1); 
J = 0;
Theta1_grad = zeros(size(Theta1));
Theta2_grad = zeros(size(Theta2));


% part 1  神經網絡正向傳播,並計算代價函數
% X,y維度見下圖:

% 5000x10  這兩條語句的意義在將y中的值變爲0-1表示
h = eye(num_labels);
y = h(y, :); 

% a1增加一列1    5000x401 (m = 5000)
a1 = [ones(m, 1) X];

%  開始一層一層往下計算,z2,a2...
z2 = a1 * Theta1' ;  	
a2 = sigmoid(z2);           

% a2也要增加一列1,計算z3,a3
n = size(a2,1);  
a2 = [ones(n, 1) a2] ;   
z3 = a2 * Theta2';
a3 = sigmoid(z3); 

% 計算代價函數J,函數如下圖:
J = sum( sum( -y .* log(a3) -  (1-y) .* log(1-a3) ))/ m; 

% 計算 正規化
% pay attention :" Theta1(:,2:end) " 因爲加了一列1,排除掉
regularized = lambda / (2 * m) * (sum(sum(Theta1(:, 2:end) .^ 2)) 
			  + 
			  sum(sum(Theta2(:, 2:end) .^ 2)) );  
J = J + regularized;  


% part2  實現反向傳播並計算下降的梯度
% 建議第一次反向傳播使用for循環,不用高級算法

% 通過a3和y值的差,算出delta3,再用 delta3 * theta2 得到 delta2,公式如下圖:
delta3 = a3 - y;  
delta2 = delta3 * Theta2; 

% 5000*25
delta2 = delta2(:, 2 : end);
delta2 = delta2 .* sigmoidGradient(z2);  

Delta_1 = zeros(size(Theta1));
Delta_2 = zeros(size(Theta2));

Delta_1 = Delta_1 + delta2' * a1;
Delta_2 = Delta_2 + delta3' * a2;

Theta1_grad = ((1 / m) * Delta_1) + ((lambda / m) * Theta1); 
Theta2_grad = ((1 / m) * Delta_2) + ((lambda / m) * Theta2);

%Theta1_grad, Theta2_grad中第一列theta值不需要正則化
Theta1_grad(:, 1) = Theta1_grad(:, 1) - ((lambda / m) * (Theta1(:, 1)));
Theta2_grad(:, 1) = Theta2_grad(:, 1) - ((lambda / m) * (Theta2(:, 1)));  

% Unroll gradients
grad = [Theta1_grad(:) ; Theta2_grad(:)];
end

X,y大小:
在這裏插入圖片描述
代價函數J:
在這裏插入圖片描述
在這裏插入圖片描述

% 4.計算代價函數,帶上正規化:

lambda = 1;
J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
                   num_labels, X, y, lambda);
% 5.計算 假設函數sigmoid的梯度(Sigmoid Gradient)

g = sigmoidGradient([-1 -0.5 0 0.5 1]);

% 函數 sigmoidGradient:
function g = sigmoidGradient(z)
g = zeros(size(z));
% g'(z),公式如下圖:
g = sigmoid(z) .* (1 - sigmoid(z)) 
end

在這裏插入圖片描述

% 6.初始化參數

initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels);

% Unroll parameters(展開參數)
initial_nn_params = [initial_Theta1(:) ; initial_Theta2(:)];

% 函數 randInitializeWeights:
function W = randInitializeWeights(L_in, L_out)
W = zeros(L_out, 1 + L_in);
epsilon_init = 0.12;
W = rand(L_out, 1 + L_in) * 2 * epsilon_init - epsilon_init;
end
% 7.梯度檢查(Check gradients)
% 這部分不太清楚

function checkNNGradients(lambda)

% 先判斷是否第一次跑,正規化第一次去掉,即lambda=0
if ~exist('lambda', 'var') || isempty(lambda)
    lambda = 0;
end

% 設置各層參數,label是3個,就是需判斷的東西
input_layer_size = 3;
hidden_layer_size = 5;
num_labels = 3;
m = 5;

% 創造一些隨機的訓練集
Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size);
Theta2 = debugInitializeWeights(num_labels, hidden_layer_size);

% 函數debugInitializeWeights:
function W = debugInitializeWeights(fan_out, fan_in)
W = zeros(fan_out, 1 + fan_in);
W = reshape(sin(1:numel(W)), size(W)) / 10;
end

% 重用上面這個函數去創造 X 訓練集
X  = debugInitializeWeights(m, input_layer_size - 1);
y  = 1 + mod(1:m, num_labels)';

% 展開參數
nn_params = [Theta1(:) ; Theta2(:)];

% Short hand for cost function
costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ...
                               num_labels, X, y, lambda);

[cost, grad] = costFunc(nn_params);
numgrad = computeNumericalGradient(costFunc, nn_params);

% 函數 computeNumericalGradient:
function numgrad = computeNumericalGradient(J, theta)
numgrad = zeros(size(theta));
perturb = zeros(size(theta));
e = 1e-4;
for p = 1:numel(theta)
    % Set perturbation vector
    perturb(p) = e;
    loss1 = J(theta - perturb);
    loss2 = J(theta + perturb);
    % Compute Numerical Gradient
    numgrad(p) = (loss2 - loss1) / (2*e);
    perturb(p) = 0;
end
end


disp([numgrad grad]);
diff = norm(numgrad-grad)/norm(numgrad+grad);
end
% 8.訓練神經網絡模型

options = optimset('MaxIter', 50);
lambda = 1;

% Create "short hand" for the cost function to be minimized
costFunction = @(p) nnCostFunction(p, ...
                                   input_layer_size, ...
                                   hidden_layer_size, ...
                                   num_labels, X, y, lambda);

[nn_params, cost] = fmincg(costFunction, initial_nn_params, options);

Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
	hidden_layer_size, (input_layer_size + 1));

Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...num_labels, (hidden_layer_size + 1));
% 9.實現預測

pred = predict(Theta1, Theta2, X);

% 函數predict:
function p = predict(Theta1, Theta2, X)
m = size(X, 1);
num_labels = size(Theta2, 1);
p = zeros(size(X, 1), 1);

% 使用假設函數sigmoid預測:
h1 = sigmoid([ones(m, 1) X] * Theta1');
h2 = sigmoid([ones(m, 1) h1] * Theta2');
[dummy, p] = max(h2, [], 2);
end

% 輸出結果,如下圖:
fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);

在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章