碎碎念:達叔說 邏輯迴歸相當於小型的神經網絡 就是沒有隱含層嘛
先簡單介紹一下需要做的事情 共分爲7步,接下來將達叔作業裏的所有程序 粘貼出來
1.,導入數據集,查看數據格式
操作:train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
train_set_x_orig.shape
一般會得到四個值(n,num_px,num_px,3) n 是訓練集的個數,如 100張貓咪的訓練照片, num_px 是圖片的長寬?比如 64,64 代表一張64×64的圖片 3 代表 RGB三個值
2.reshape
操作:train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
X_flatten=X.shape(X.shape[0],-1).T 此操作可以將(a,b,c,d)的矩陣轉化爲(b*c*d,a)的二維矩陣
將(n,num_px,num_px,3)維度的數據reshape成(num_px×num_px×3,n)樣子的數據,如(12288,n)12288行,n列。每一列是一張圖片,每張圖片有12288個特徵。n代表 共有n張圖片。這樣所有訓練集的維度就是一樣的了
同時不要忘記對測試集也進行相同的操作
3.預處理(歸一化)
操作:train_set_x = train_set_x_flatten/255.
一般來說 預處理包含center 去中心化和standardize標準化兩步,但是因爲這裏每個12288個值,其實每一值代表圖片上64×64個點中的RGB中的一個值,所以每個值得範圍是(0,255)。所以每個值除以255就ok了
4.初始化參數
操作:w = np.zeros((dim, 1))
b = 0
一般來說有多少個輸入特徵,行數 這裏是12288 一般就有多少個w
5.代價函數最小化(利用梯度下降法)
參考公式
操作: m = X.shape[1]
A = sigmoid(np.dot(w.T, X) + b) # compute activation 因爲w是一個與X的行數一樣,一列的向量 ,所以求點積時,需要轉置再求
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)) # compute cost
dw = 1 / m * np.dot(X, (A - Y).T)
db = 1 / m * np.sum(A - Y)
w = w - learning_rate * dw
b = b - learning_rate * db
6.更換學習率 α 畫圖觀察不同學習率下的 cost變化情況 得到算法的正確率
操作:learning_rates = [0.01, 0.001, 0.0001]
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"])
7.得出結論
一步一步定義函數,然後定義模型,在模型中調用函數,畫圖觀察cost 確定合適的學習率α
程序部分
#邏輯迴歸的整個過程
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
#1 導入數據
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
#1 檢查數據的shape
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
#2.reshape 統一shape
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
#3 預處理 數據標準化
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
#4分別定義了 sigmoid函數 sigmoid導數 初始化參數 正向傳播 梯度下降法等函數
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
def sigmoid_derivative(x):
s = sigmoid(x)
ds = s * (1 - s)
return ds
def initialize_with_zeros(dim):
w = np.zeros((dim, 1))
b = 0
return w, b
def propagate(w, b, X, Y):
m = X.shape[1]
A = sigmoid(np.dot(w.T, X) + b) # compute activation
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X, (A - Y).T)
db = 1 / m * np.sum(A - Y)
cost = np.squeeze(cost)
grads = {"dw": dw,
"db": db}
return grads, cost
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
#通過梯度下降法實現w,b的更新
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
grads, cost = propagate(w, b, X, Y)
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
if i % 100 == 0:
costs.append(cost)
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
# 定義了 查看算法正確率的函數
def predict(w, b, X):
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
A = sigmoid(np.dot(w.T, X) + b)
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
if A[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
return Y_prediction
#5 將以上所有函數結合在一起形成一個model 來實現邏輯迴歸
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
#print_cost -- Set to true to print the cost every 100 iterations
D={}
w, b = initialize_with_zeros(X_train.shape[0])
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
w = parameters["w"]
b = parameters["b"]
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test))))
D={"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train": Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return D
#5 調用model函數 求解 一些列值
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
#6.可視化 把學習率爲0.05 情況下的cost 圖畫出來
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
#更換不同學習率 查看不同學習率下的cost變化
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()