keras數字預測

波士頓房價,數據keras內置可以下載。

from keras.datasets import boston_housing
import numpy as np
from keras import models
from keras import layers
#訓練集404個,測試集102個數據,特徵13個
(train_data,train_targets),(test_data,test_targets)=boston_housing.load_data()
#處理數據,先減去平均值,再除以標準差,axis=0表示對列取平均等
mean=train_data.mean(axis=0)
train_data-=mean
std=train_data.std(axis=0)
train_data/=std
test_data-=mean
test_data/=std
def build_model():
	model=models.Sequential()
	model.add(layers.Dense(64,activation='relu',input_shape=(train_data.shape[1],)))
	model.add(layers.Dense(64,activation='relu'))
	#迴歸輸出不需要激活函數,否則將限制輸出範圍
	model.add(layers.Dense(1))
	#損失函數一般爲均方誤差,mae爲平均絕對誤差
	model.compile(loss='mse',optimizer='rmsprop',metrics=['mae'])
	return model

#K-fold cross-validation,當訓練集太小不好分出驗證集時,將訓練集均分爲K份,
#每次以其中一份作爲驗證集,K一般爲4-5
k=4
# //表示整除,/表示浮點數除
each_samples=len(train_data)//k
num_epochs=100
all_scores=[]
for i in range(k):
	print('processing fold #',i)
	val_data=train_data[i*each_samples:(i+1)*each_samples]
	val_targets=train_targets[i*each_samples:(i+1)*each_samples]
	#拼合數組
	part_train_data=np.concatenate([train_data[:i*each_samples],train_data[(i+1)*each_samples:]],axis=0)
	part_train_targets=np.concatenate([train_targets[:i*each_samples],train_targets[(i+1)*each_samples:]],axis=0)
	model=build_model()
	#verbose:0對應不輸出,1對應輸出進度,2對應爲每個epoch輸出日誌,默認爲1
	model.fit(part_train_data,part_train_targets,epochs=num_epochs,batch_size=1,verbose=0)
	val_mse,val_mae=model.evaluate(val_data,val_targets,verbose=0)
	all_scores.append(val_mae)

#平均絕對誤差的平均值爲2.3953995083523267
print(np.mean(all_scores))

# epochs改爲500,實際80左右就開始過擬合了。而且運行耗時
num_epochs=500
all_mae_history=[]
for i in range(k):
	print('processing fold #',i)
	val_data=train_data[i*each_samples:(i+1)*each_samples]
	val_targets=train_targets[i*each_samples:(i+1)*each_samples]
	#拼合數組
	part_train_data=np.concatenate([train_data[:i*each_samples],train_data[(i+1)*each_samples:]],axis=0)
	part_train_targets=np.concatenate([train_targets[:i*each_samples],train_targets[(i+1)*each_samples:]],axis=0)
	model=build_model()
	#verbose:0對應不輸出,1對應輸出進度,2對應爲每個epoch輸出日誌,默認爲1
	history=model.fit(part_train_data,part_train_targets,epochs=num_epochs,batch_size=1,validation_data=(val_data,val_targets),verbose=0)
	mae_history=history.history['val_mean_absolute_error']
	all_mae_history.append(mae_history)
	test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
	#2.46-2.92 平均爲2.7540025629249274
	print('test_mae_score:',test_mae_score)

avg_mae_history=[np.mean([x[i] for x in all_mae_history]) for i in range(num_epochs)]
# print(avg_mae_history)

#改爲80
num_epochs=80
model=build_model()
#在所有訓練集上操作
model.fit(train_data,train_targets,epochs=num_epochs,batch_size=16,verbose=0)
mse,mae=model.evaluate(test_data,test_targets)
#結果爲15.68760591394761   2.5484998366411995
print(mse,mae)

 如圖,驗證mae結果如下,這裏面因爲數據很少,採用了K-fold cross-validation,這是一種在數據很少時非常有用的技術。

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章