實現只有三層的神經網絡
import numpy as np
#定義sigmoid函數
def sigmoid(x,deriv = False):
if (deriv == True):
return x*(1-x)
return 1/(1+np.exp(-x))
#輸入
x = np.array([[0,0,1],[0,1,1],[1,0,1],[1,1,1],[0,0,1]])
#print(x.shape)
#輸出標籤
y = np.array([[0],[1],[1],[0],[0]])
#print(y.shape)
np.random.seed(1)
#初始化權重參數,歸一化到(-1,1)之間
w0 = 2*np.random.random((3,4))-1 #三行四列矩陣,三的維度是固定的
w1 = 2*np.random.random((4,1))-1 #輸出0或1,維度爲1
#print(w0)
for j in range(60000):
l0=x
l1=sigmoid(np.dot(l0,w0))
l2=sigmoid(np.dot(l1,w1))
#用均方誤差做損失函數
l2_error = y-l2
#print(l2_error.shape)
if j%5000 == 0:
print ('Error'+str(np.mean(np.abs(l2_error))))
#w1對l2的錯誤做了多大貢獻
#sigmoid(l2,deriv = True)爲sigmoid導數值
l2_delta = l2_error*sigmoid(l2,deriv = True)
l1_error = l2_delta.dot(w1.T)
l1_delta = l1_error*sigmoid(l1,deriv = True)
#更新w0,w1
w1 += l1.T.dot(l2_delta)
w0 += l0.T.dot(l1_delta)
下面是輸出結果:
Error0.491873939736
Error0.0151236636586
Error0.00979428446397
Error0.00768391123459
Error0.00649307653844
Error0.00570862063183
Error0.00514415767838
Error0.00471401703819
Error0.0043727491817
Error0.00409377051499
Error0.00386038674497
Error0.00366152984108