時間序列分解模型 —— Neural Decomposition

論文及代碼

原始論文
在這裏插入圖片描述
代碼:https://github.com/trokas/neural_decomposition


模型

將時間序列分解成周期項和非週期項 g(t)g(t)

x(t)=k=1N(aksin(wkt+ϕk))+g(t). x(t) = \sum _{k = 1}^{N}{( a_{k} \cdot \sin ( w_{k} t + \phi _{k} )) } + g(t).

其中 ak,wk,ϕka_k, w_k, \phi_kg(t)g(t) 都需要從數據中學習。

網絡結構爲:
在這裏插入圖片描述
其中 g(t)g(t) 可以理解爲序列的趨勢項,主要考慮線性趨勢 wt+bwt+b,sigmoid函數,softplus函數等等。

這個模型很容易讓人想到 prophet 模型,甚至是 prophet 的簡化版,因爲 prophet 好歹考慮的是分段線性趨勢,還有節假日帶來的影響。不過這個模型的優勢在於可以通過數據來調節頻譜,而 prophet 是從預先給定的頻譜中篩選頻譜。

代碼

參見上述 github 地址,用 keras 實現起來太簡單啦,,,

model

%matplotlib inline
import matplotlib.pyplot as plt
import seaborn
import numpy as np
import pandas as pd
from keras.models import Input, Model, Sequential
from keras.layers.core import Dense
from keras.layers.merge import Concatenate
from keras.layers import LSTM, Activation
from keras import regularizers
from scipy.interpolate import interp1d
from fbprophet import Prophet

plt.rcParams['figure.figsize'] = [12.0, 8.0]


def create_model(n, units=10, noise=0.001):
    """
    Constructs neural decomposition model and returns it
    """
    data = Input(shape=(1,), name='time')
    # sin will not work on TensorFlow backend, use Theano instead
    sinusoid = Dense(n, activation=np.sin, name='sin')(data)
    linear = Dense(units, activation='linear', name='linear')(data)
    softplus = Dense(units, activation='softplus', name='softplus')(data)
    sigmoid = Dense(units, activation='sigmoid', name='sigmoid')(data)
    combined = Concatenate(name='combined')([sinusoid, linear, softplus, sigmoid])
    out = Dense(1, kernel_regularizer=regularizers.l1(0.01), name='output')(combined)

    model = Model(inputs=[data], outputs=[out])    
    model.compile(loss="mse", optimizer="adam")

    model.weights[0].set_value((2*np.pi*np.floor(np.arange(n)/2))[np.newaxis,:].astype('float32'))
    model.weights[1].set_value((np.pi/2+np.arange(n)%2*np.pi/2).astype('float32'))
    model.weights[2].set_value((np.ones(shape=(1,units)) + np.random.normal(size=(1,units))*noise).astype('float32'))
    model.weights[3].set_value((np.random.normal(size=(units))*noise).astype('float32'))
    model.weights[4].set_value((np.random.normal(size=(1,units))*noise).astype('float32'))
    model.weights[5].set_value((np.random.normal(size=(units))*noise).astype('float32'))
    model.weights[6].set_value((np.random.normal(size=(1,units))*noise).astype('float32'))
    model.weights[7].set_value((np.random.normal(size=(units))*noise).astype('float32'))
    model.weights[8].set_value((np.random.normal(size=(n+3*units,1))*noise).astype('float32'))
    model.weights[9].set_value((np.random.normal(size=(1))*noise).astype('float32'))

    return model

測試序列

t = np.linspace(0, 1, 100)
X = np.sin(4.25*np.pi*t) + np.sin(8.5*np.pi*t) + 5*t
plt.plot(X)

在這裏插入圖片描述

訓練

%%time
model = create_model(len(X))
hist = model.fit(t, X, epochs=3000, verbose=0)
plt.plot(hist.history['loss'])

Wall time: 15.8 s
在這裏插入圖片描述

預測

prediction = model.predict(np.linspace(0, 2, 200)).flatten()

plt.plot(prediction, color='blue')
plt.plot(X, color='red')

在這裏插入圖片描述

加點噪聲

t = np.linspace(0, 1, 100)
X = np.sin(4.25*np.pi*t) + np.sin(8.5*np.pi*t) + 5*t + np.random.uniform(size=100)
model = create_model(len(X))
hist = model.fit(t, X, epochs=3000, verbose=0)
prediction = model.predict(np.linspace(0, 2, 200)).flatten()
plt.plot(prediction, color='blue')
plt.plot(X, color='red')

在這裏插入圖片描述

pytorch 實現

class ND(nn.Module):
    def __init__(self, n, units=10, noise=0.001):
        super(ND, self).__init__()
        self.wave = nn.Linear(1, n)
        self.unit_linear = nn.Linear(1, units)
        self.unit_softplus = nn.Linear(1, units)
        self.unit_sigmoid = nn.Linear(1, units)
        self.fc = nn.Linear(n + 3*units, 1)
        # Initialize weights
        params = dict(self.named_parameters())
        params['wave.weight'].data = torch.from_numpy((2*np.pi*np.floor(np.arange(n)/2))[:,np.newaxis]).float()
        params['wave.bias'].data = torch.from_numpy(np.pi/2+np.arange(n)%2*np.pi/2).float()
        params['unit_linear.weight'].data = torch.from_numpy(np.ones(shape=(units,1)) + np.random.normal(size=(units,1))*noise).float()
        params['unit_linear.bias'].data = torch.from_numpy(np.random.normal(size=(units))*noise).float()
        params['unit_softplus.weight'].data = torch.from_numpy(np.random.normal(size=(units,1))*noise).float()
        params['unit_softplus.bias'].data = torch.from_numpy(np.random.normal(size=(units))*noise).float()
        params['unit_sigmoid.weight'].data = torch.from_numpy(np.random.normal(size=(units,1))*noise).float()
        params['unit_sigmoid.bias'].data = torch.from_numpy(np.random.normal(size=(units))*noise).float()
        params['fc.weight'].data = torch.from_numpy(np.random.normal(size=(1,n+3*units))*noise).float()
        params['fc.bias'].data = torch.from_numpy(np.random.normal(size=(1))*noise).float()

    def forward(self, x):
        sinusoid = torch.sin(self.wave(x))
        linear = self.unit_linear(x)
        softplus = nn.Softplus()(self.unit_softplus(x))
        sigmoid = nn.Sigmoid()(self.unit_sigmoid(x))
        combined = torch.cat([sinusoid, linear, softplus, sigmoid], dim=1)
        out = self.fc(combined)
        return out

# x = np.linspace(0, 1.5, 150)
# x = np.sin(4.25*np.pi*x)+np.sin(8.5*np.pi*x)+5*x
x = np.linspace(0,1,100)[:, np.newaxis]
X = Variable(torch.from_numpy(x).float())
y = np.sin(np.linspace(0,20,100))[:, np.newaxis]
Y = Variable(torch.from_numpy(y).float())

nd = ND(x.shape[0])
print(nd)

# Loss and Optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(nd.parameters())  

# Train the Model
for epoch in range(2000):
    # Forward + Backward + Optimize
    optimizer.zero_grad()  # zero the gradient buffer
    outputs = nd.forward(X)
    loss = criterion(outputs, Y)

    # Add L1 to loss
    loss += 0.01*torch.sum(torch.abs(dict(nd.named_parameters())['fc.weight']))

    loss.backward()
    optimizer.step()

    if epoch % 100 == 0:
        print ('Epoch {0}, Loss: {1:.4f}'.format(epoch, loss.data[0]))
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章