TFLearn 时间序列预测预测
TFLearn time series forecasting prediction
定义我的神经网络并训练我的模型后:
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.lstm(net, timesteps, dropout=0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric='R2')
# Define model
model = tflearn.DNN(net, clip_gradients=0.)
model.fit(X, y, n_epoch=nb_epoch, batch_size=batch_size, shuffle=False, show_metric=True)
score = model.evaluate(X, y, batch_size=128)
model.save('ModeSpot.tflearn')
我现在遇到了一个问题,我发现进行时间序列预测的大部分教程都使用测试集进行预测(他们将测试集提供给 .predict())。问题是实际上我们不知道这一点,因为这是我们想要预测的。
目前我正在使用:
def forecast_lstm(model, X):
X = X.reshape(len(X), 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
# split data into train and test-sets
train, test = supervised_values[0:-10000], supervised_values[-10000:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.lstm(net, 1, dropout=0.3)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric='R2')
lstm_model = tflearn.DNN(net, clip_gradients=0.)
lstm_model.load('ModeSpot.tflearn')
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped)
# walk-forward validation on the test data
predictions = list()
error_scores = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, X)
# invert scaling
yhat2 = invert_scale(scaler, X, yhat)
# # invert differencing
yhat3 = inverse_difference(raw_values, yhat2, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat3)
但它只适用于我的测试集。我该怎么做才能预测下一个 x 值?
我想我已经在某个地方看到,要预测 T 处的值,我必须使用 T-1 处的值进行预测(然后 T 用于 T+1 等等,直到我达到我想要的预测数量)。这是个好方法吗?
我试过这样做:
def forecast_lstm2(model, X):
X = X.reshape(-1, 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
test = list()
X, y = train_scaled[0, 0:-1], train_scaled[0, -1]
test.append(X)
for i in range(len(test_scaled)):
# make one-step forecast
yhat = forecast_lstm2(lstm_model, test[i])
test.append(yhat)
# invert scaling
yhat2 = invert_scale(scaler, test[i+1], yhat)
# # invert differencing
yhat3 = inverse_difference(raw_values, yhat2, len(test) + 1 - i)
# store forecast
predictions.append(yhat3)
但没有达到预期的效果(经过一些预测,它总是给出相同的结果)。
感谢您的关注和时间。
最后这似乎奏效了:
# 进行一步预测
def forecast_lstm2(型号,X):
X = X.reshape(-1, 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
def prediction(spotId):
epoch = [5, 15, 25, 35, 45, 50, 100]
for e in epoch:
tf.reset_default_graph()
# Load CSV file, indicate that the first column represents labels
data = read_csv('nowcastScaled'+str(spotId)+'.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = data.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets (I removed the testing data from the excel file)
train = supervised_values[x:]
# transform the scale of the data (and removed anything related to testing set)
scaler, train_scaled = scale(train)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.lstm(net, 1, dropout=0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.0001,
loss='mean_square', metric='R2')
lstm_model = tflearn.DNN(net, clip_gradients=0.)
lstm_model.load('ModeSpot'+str(spotId)+'Epoch'+str(e)+'.tflearn')
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped)
# walk-forward validation on the test data
predictions = list()
predictionFeeder = list()
X, y = train_scaled[0, 0:-1], train_scaled[0, -1]
predictionFeeder.append(X)
for i in range(0, 10000):
# make one-step forecast
yhat = forecast_lstm2(lstm_model, predictionFeeder[i])
predictionFeeder.append(yhat)
# invert scaling
yhat2 = invert_scale(scaler, predictionFeeder[i + 1], yhat)
yhat3 = inverse_difference(raw_values, yhat2, 10000 + 1 - i)
predictions.append(yhat3)
定义我的神经网络并训练我的模型后:
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.lstm(net, timesteps, dropout=0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric='R2')
# Define model
model = tflearn.DNN(net, clip_gradients=0.)
model.fit(X, y, n_epoch=nb_epoch, batch_size=batch_size, shuffle=False, show_metric=True)
score = model.evaluate(X, y, batch_size=128)
model.save('ModeSpot.tflearn')
我现在遇到了一个问题,我发现进行时间序列预测的大部分教程都使用测试集进行预测(他们将测试集提供给 .predict())。问题是实际上我们不知道这一点,因为这是我们想要预测的。
目前我正在使用:
def forecast_lstm(model, X):
X = X.reshape(len(X), 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
# split data into train and test-sets
train, test = supervised_values[0:-10000], supervised_values[-10000:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.lstm(net, 1, dropout=0.3)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square', metric='R2')
lstm_model = tflearn.DNN(net, clip_gradients=0.)
lstm_model.load('ModeSpot.tflearn')
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped)
# walk-forward validation on the test data
predictions = list()
error_scores = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, X)
# invert scaling
yhat2 = invert_scale(scaler, X, yhat)
# # invert differencing
yhat3 = inverse_difference(raw_values, yhat2, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat3)
但它只适用于我的测试集。我该怎么做才能预测下一个 x 值? 我想我已经在某个地方看到,要预测 T 处的值,我必须使用 T-1 处的值进行预测(然后 T 用于 T+1 等等,直到我达到我想要的预测数量)。这是个好方法吗?
我试过这样做:
def forecast_lstm2(model, X):
X = X.reshape(-1, 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
test = list()
X, y = train_scaled[0, 0:-1], train_scaled[0, -1]
test.append(X)
for i in range(len(test_scaled)):
# make one-step forecast
yhat = forecast_lstm2(lstm_model, test[i])
test.append(yhat)
# invert scaling
yhat2 = invert_scale(scaler, test[i+1], yhat)
# # invert differencing
yhat3 = inverse_difference(raw_values, yhat2, len(test) + 1 - i)
# store forecast
predictions.append(yhat3)
但没有达到预期的效果(经过一些预测,它总是给出相同的结果)。
感谢您的关注和时间。
最后这似乎奏效了: # 进行一步预测 def forecast_lstm2(型号,X): X = X.reshape(-1, 1, 1) yhat = model.predict(X) return yhat[0, 0]
def prediction(spotId):
epoch = [5, 15, 25, 35, 45, 50, 100]
for e in epoch:
tf.reset_default_graph()
# Load CSV file, indicate that the first column represents labels
data = read_csv('nowcastScaled'+str(spotId)+'.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = data.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets (I removed the testing data from the excel file)
train = supervised_values[x:]
# transform the scale of the data (and removed anything related to testing set)
scaler, train_scaled = scale(train)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.lstm(net, 1, dropout=0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.0001,
loss='mean_square', metric='R2')
lstm_model = tflearn.DNN(net, clip_gradients=0.)
lstm_model.load('ModeSpot'+str(spotId)+'Epoch'+str(e)+'.tflearn')
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped)
# walk-forward validation on the test data
predictions = list()
predictionFeeder = list()
X, y = train_scaled[0, 0:-1], train_scaled[0, -1]
predictionFeeder.append(X)
for i in range(0, 10000):
# make one-step forecast
yhat = forecast_lstm2(lstm_model, predictionFeeder[i])
predictionFeeder.append(yhat)
# invert scaling
yhat2 = invert_scale(scaler, predictionFeeder[i + 1], yhat)
yhat3 = inverse_difference(raw_values, yhat2, 10000 + 1 - i)
predictions.append(yhat3)