单层感知机
多重感知机
BP神经网络
注意
自定义BP代码
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1/(1+np.exp(-x))
def bpnet(X=None,Y=None,maxiter=200,yita=0.75,hidSize=5):
n,m = X.shape
net_in = -np.ones([m+1]) #网络输入
out_in = -np.ones([hidSize+1]) #隐层输出,输出层的输入
w_mid = np.random.rand(m+1,hidSize) #初始隐层神经元的权值
w_out = np.random.rand(hidSize+1) #初始输出层神经元的权值
delta_w_mid = np.zeros([m+1,hidSize]) #中间层权值修正量
Error = np.zeros([maxiter])
for it in range(maxiter):
error = np.zeros([n])
for j in range(n):
net_in[:m] = X[j,:] #更新网络输入值
real = Y[j] #对应实际值
#===从输入到隐层输出
for i in range(hidSize):
out_in[i] = sigmoid(sum(net_in*w_mid[:,i]))
res = sigmoid(sum(out_in*w_out)) #隐层输出到网络输出
#===输出层权值修正量
delta_w_out = yita*res*(1-res)*(real-res)*out_in
delta_w_out[hidSize] = -yita*res*(1-res)*(real-res)
w_out = w_out + delta_w_out
#===中间层权值修正量
for i in range(hidSize):
delta_w_mid[:,i] = yita*out_in[i]*(1-out_in[i])*w_out[i]*res*(1-res)*(real-res)*net_in
delta_w_mid[m,i] = -yita*out_in[i]*(1-out_in[i])*w_out[i]*res*(1-res)*(real-res)
w_mid = w_mid + delta_w_mid
error[j] = abs(real-res)
# print('real: ',real,' res: ',res)
Error[it] = error.mean()
plt.plot(Error)
plt.show()
return w_mid,w_out
def bpnet_predict(X=None,Y=None,hidSize=5,w_mid=None,w_out=None):
n,m = X.shape
net_in = -np.ones([m+1]) #网络输入
out_in = -np.ones([hidSize+1]) #隐层输出,输出层的输入
for j in range(n):
net_in[:m] = X[j,:] #更新网络输入值
real = Y[j]
#===从输入到隐层输出
for i in range(hidSize):
out_in[i] = sigmoid(sum(net_in*w_mid[:,i]))
res = sigmoid(sum(out_in*w_out)) #隐层输出到网络输出
print('real: ',real,' res: ',res)
return res
Sklearn中的MLP
class sklearn.neural_network.MLPClassifier (hidden_layer_sizes=(100, ), activation=’relu’,
solver=’adam’, alpha=0.0001, batch_size=’auto’, learning_rate=’constant’, learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False,
warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, n_iter_no_change=10)
重要参数hidden_layer_sizes
重要参数 | 意义 |
---|---|
hidden_layer_sizes | 元组,长度= n_layers - 2,默认值(100, )元祖中包含多少个元素,就表示设定多少隐藏层元祖中的第i个元素表示第i个隐藏层中的神经元数量 |
例子验证
import numpy as np
from sklearn.neural_network import MLPClassifier as DNN
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score as cv
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.model_selection import train_test_split as TTS
from time import time
import datetime
data = load_breast_cancer()
X = data.data
y = data.target
Xtrain, Xtest, Ytrain, Ytest = TTS(X,y,test_size=0.3,random_state=420)
times = time()
dnn = DNN(hidden_layer_sizes=(100,),random_state=420)
print(cv(dnn,X,y,cv=5).mean())
print(time() - times)
#使用决策树进行一个对比
times = time()
clf = DTC(random_state=420)
print(cv(clf,X,y,cv=5).mean())
print(time() - times)
dnn = DNN(hidden_layer_sizes=(100,),random_state=420).fit(Xtrain,Ytrain)
dnn.score(Xtest,Ytest)
#使用重要参数n_layers_
dnn.n_layers_
#可见,默认层数是三层,由于必须要有输入和输出层,所以默认其实就只有一层隐藏层
#如果增加一个隐藏层上的神经元个数,会发生什么呢?
dnn = DNN(hidden_layer_sizes=(200,),random_state=420)
dnn = dnn.fit(Xtrain,Ytrain)
dnn.score(Xtest,Ytest)
#来试试看学习曲线
s = []
for i in range(100,2000,100):
dnn = DNN(hidden_layer_sizes=(int(i),),random_state=420).fit(Xtrain,Ytrain)
s.append(dnn.score(Xtest,Ytest))
print(i,max(s))
plt.figure(figsize=(20,5))
plt.plot(range(100,2000,100),s)
plt.show()
#那如果增加隐藏层,控制神经元个数,会发生什么呢?
s = []
layers = [(100,),(100,100),(100,100,100),(100,100,100,100),
(100,100,100,100,100),(100,100,100,100,100,100)]
for i in layers:
dnn = DNN(hidden_layer_sizes=(i),random_state=420).fit(Xtrain,Ytrain)
s.append(dnn.score(Xtest,Ytest))
print(i,max(s))
plt.figure(figsize=(20,5))
plt.plot(range(3,9),s)
plt.xticks([3,4,5,6,7,8])
plt.xlabel("Total number of layers")
plt.show()
#如果同时增加隐藏层和神经元个数,会发生什么呢?
s = []
layers = [(100,),(150,150),(200,200,200),(300,300,300,300)]
for i in layers:
dnn = DNN(hidden_layer_sizes=(i),random_state=420).fit(Xtrain,Ytrain)
s.append(dnn.score(Xtest,Ytest))
print(i,max(s))
plt.figure(figsize=(20,5))
plt.plot(range(3,7),s)
plt.xticks([3,4,5,6])
plt.xlabel("Total number of layers")
plt.show()