用到的模塊
import numpy
import matplotlib.pyplot as plt
from pandas import read_csv
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.metrics import mean_squared_error
from keras.callbacks import ReduceLROnPlateau
from sklearn.preprocessing import MinMaxScaler
載入數據
dataframe = read_csv('china.csv',encoding='utf-8' ,usecols=[6],skiprows=5)
dataset = dataframe.values
plt.plot(dataset)#查看趨勢
plt.show()
m = numpy.argmax(dataset)#找到異常值位置
dataset[m] = 0.5*(dataset[m-1]+dataset[m+1])#平均插值
dataset_or = dataset
爲後續lstm的輸入創建一個數據處理函數
def create_dataset(dataset, look_back=1):#look_back爲滑窗
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
設置隨機種子,標準化數據
numpy.random.seed(7)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train = dataset
設置時間滑窗,創建訓練集
look_back = 7
trainX, trainY = create_dataset(train, look_back)
對訓練集x做reshape
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
搭建lstm網絡
model = Sequential()
model.add(LSTM(25, input_shape=(1, look_back)))#輸出節點爲25,輸入的每個樣本長度爲look_back
model.add(Dense(1))#添加一個全連接層,輸出維度爲1
model.compile(loss='mean_squared_error', optimizer='adam')#使用均方差做損失函數。優化器用adam
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='max')
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2, callbacks=[reduce_lr])#訓練模型,100epoch,批次爲1,每一個epoch顯示一次日誌,學習率動態減小
預測
trainPredict = model.predict(trainX)#預測訓練集
預測後7天
testx = [0.]*(7+look_back)
testx[0:look_back] = train[-look_back:]
testx = numpy.array(testx)
testPredict = [0]*7
for i in range(7):
testxx = testx[-look_back:]
testxx = numpy.reshape(testxx, (1, 1, look_back))
testy = model.predict(testxx)
testx[look_back+i] = testy
testPredict[i] = testy
testPredict = numpy.array(testPredict)
testPredict = numpy.reshape(testPredict,(7,1))
反標準化
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
輸出訓練RMSE
trainScore = math.sqrt(mean_squared_error(trainY[0,:], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
畫圖查看模型預測結果
trainPredictPlot = numpy.reshape(numpy.array([None]*(len(dataset)+7)),((len(dataset)+7),1))
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = numpy.reshape(numpy.array([None]*(len(dataset)+7)),((len(dataset)+7),1))
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(dataset):(len(dataset)+7), :] = testPredict
plt.plot(dataset_or,label='true')
plt.plot(trainPredictPlot,label='trainpredict')
plt.plot(testPredictPlot,label='testpredict')
plt.legend()
plt.show()
其實總體來說效果一般,而且這麼少的數據其實不適合用lstm,我是看着鍾南山院士在論文中用了lstm來做預測我才用的,不過鍾院士用了sas的數據做了預訓練。