單變量LSTM預測模型(4)
步驟概覽
代碼解析
# coding=utf-8
from pandas import read_csv
from pandas import datetime
from pandas import concat
from pandas import DataFrame
from pandas import Series
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
from matplotlib import pyplot
import numpy
#讀取時間數據的格式化
def parser(x):
return datetime.strptime(x, '%Y/%m/%d')
#轉換成有監督數據
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]#數據滑動一格,作爲input,df原數據爲output
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
#差分
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i-interval]
diff.append(value)
return Series(diff)
#逆差分
def inverse_difference(history, yhat, interval=1):#歷史數據,預測數據,差分間隔
return yhat+history[-interval]
#縮放
def scale(train, test):
#根據訓練數據建立縮放器
scaler = MinMaxScaler(feature_range=(-1,1))
scaler = scaler.fit(train)
#轉換train data
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
#轉換test data
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
#逆縮放
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
#fit LSTM來訓練數據
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:,-1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
#添加LSTM層
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))#輸出層1個node
#編譯,損失函數mse+優化算法adam
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
#按照batch_size,一次讀取batch_size個數據
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
#1步長預測
def forcast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
#加載數據
series = read_csv('data_set/shampoo-sales.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
#讓數據變成穩定的
raw_values = series.values
diff_values = difference(raw_values, 1)
#變成有監督數據
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
#數據拆分:訓練數據、測試數據
train, test = supervised_values[0:-12], supervised_values[-12:]
#數據縮放
scaler, train_scaled, test_scaled = scale(train, test)
#fit 模型
lstm_model = fit_lstm(train_scaled, 1, 3000, 4)#訓練數據,batch_size,epoche次數, 神經元個數
#預測
train_reshaped = train_scaled[:,0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
#測試數據的前向驗證
predictions = list()
for i in range(len(test_scaled)):
#1步長預測
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forcast_lstm(lstm_model, 1, X)
#逆縮放
yhat = invert_scale(scaler, X, yhat)
#逆差分
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
predictions.append(yhat)
expected = raw_values[len(train)+i+1]
print('Moth=%d, Predicted=%f, Expected=%f'%(i+1, yhat, expected))
#性能報告
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('Test RMSE:%.3f' %rmse)
#繪圖
pyplot.plot(raw_values[-12:])
pyplot.plot(predictions)
pyplot.show()