Tflearn 网络架构不收敛
Tflearn network architecture not converging
在获得很多帮助后,我最后一次来这里解决我最后一个找不到解决方案的问题。
继我之前的 之后,一位用户指出我的时间序列预测结果不佳可能是因为我的架构没有收敛。
在查看它并尝试了一些我在其他问题上发现的修复方法(设置权重、降低学习率、更改 optimizer/activation)之后,我似乎无法获得更好的结果,总是在以下位置获得准确度0(或 0.0003,这还不够好)。
我的代码:
import numpy
import numpy as np
import tflearn
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from math import sqrt
import datetime
# Preprocessing function
from tflearn import Accuracy, Momentum
def preprocess(data):
return np.array(data, dtype=np.int32)
def parser(x):
return datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[0:-1], train[:, -1]
X = X[:, 0].reshape(len(X), 1, 1)
y = y.reshape(len(y), 1)
print (X.shape)
print (y.shape)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square')
# Define model
model = tflearn.DNN(net, tensorboard_verbose=3, best_val_accuracy=0.6)
model.fit(X, y, n_epoch=nb_epoch, batch_size=batch_size, shuffle=False, show_metric=True)
score = model.evaluate(X, y, batch_size=128)
print (score)
return model
# make a one-step forecast
def forecast_lstm(model, X):
X = X.reshape(len(X), 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
# Load CSV file, indicate that the first column represents labels
data = read_csv('nowcastScaled.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = data.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:10000], supervised_values[10000:10100]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
repeats = 1
for r in range(repeats):
# fit the model
lstm_model = fit_lstm(train_scaled, 128, 6, 1)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
print (lstm_model.predict(train_reshaped))
# walk-forward validation on the test data
predictions = list()
error_scores = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# # invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat)
rmse = sqrt(mean_squared_error(raw_values[10000:10100], predictions))
print('%d) Test RMSE: %.3f' % (1, rmse))
error_scores.append(rmse)
print predictions
print raw_values[10000:10100]
这是我从 运行 得到的结果(提高 epoch 似乎并没有使它变得更好):
Training Step: 472 | total loss: 0.00486 | time: 0.421s
| Adam | epoch: 006 | loss: 0.00486 - binary_acc: 0.0000 -- iter: 9856/9999
Training Step: 473 | total loss: 0.00453 | time: 0.427s
| Adam | epoch: 006 | loss: 0.00453 - binary_acc: 0.0000 -- iter: 9984/9999
Training Step: 474 | total loss: 0.00423 | time: 0.430s
| Adam | epoch: 006 | loss: 0.00423 - binary_acc: 0.0000 -- iter: 9999/9999
我已尝试 lower/raise 大部分设置,但没有成功。
这是data I'm using(单变量时间序列)的摘录,在训练中使用或多或少的数据也没有做任何事情。
(Ps:我的代码主要来自 this tutorial,我不得不稍微修改一下,因为我想尝试使用 Tflearn)
您不能为回归问题定义 accuracy
。您只需跟踪预测和实际的 MSE。你的训练损失似乎很低,所以如果预测不接近,那么要么你的缩放逆不正确,要么你过度拟合。
在获得很多帮助后,我最后一次来这里解决我最后一个找不到解决方案的问题。
继我之前的
在查看它并尝试了一些我在其他问题上发现的修复方法(设置权重、降低学习率、更改 optimizer/activation)之后,我似乎无法获得更好的结果,总是在以下位置获得准确度0(或 0.0003,这还不够好)。
我的代码:
import numpy
import numpy as np
import tflearn
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from math import sqrt
import datetime
# Preprocessing function
from tflearn import Accuracy, Momentum
def preprocess(data):
return np.array(data, dtype=np.int32)
def parser(x):
return datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag + 1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[0:-1], train[:, -1]
X = X[:, 0].reshape(len(X), 1, 1)
y = y.reshape(len(y), 1)
print (X.shape)
print (y.shape)
# Build neural network
net = tflearn.input_data(shape=[None, 1, 1])
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.dropout(net, 0.8)
net = tflearn.fully_connected(net, 1, activation='linear', weights_init=tnorm)
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='mean_square')
# Define model
model = tflearn.DNN(net, tensorboard_verbose=3, best_val_accuracy=0.6)
model.fit(X, y, n_epoch=nb_epoch, batch_size=batch_size, shuffle=False, show_metric=True)
score = model.evaluate(X, y, batch_size=128)
print (score)
return model
# make a one-step forecast
def forecast_lstm(model, X):
X = X.reshape(len(X), 1, 1)
yhat = model.predict(X)
return yhat[0, 0]
# Load CSV file, indicate that the first column represents labels
data = read_csv('nowcastScaled.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# transform data to be stationary
raw_values = data.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:10000], supervised_values[10000:10100]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
repeats = 1
for r in range(repeats):
# fit the model
lstm_model = fit_lstm(train_scaled, 128, 6, 1)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
print (lstm_model.predict(train_reshaped))
# walk-forward validation on the test data
predictions = list()
error_scores = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# # invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled) + 1 - i)
# store forecast
predictions.append(yhat)
rmse = sqrt(mean_squared_error(raw_values[10000:10100], predictions))
print('%d) Test RMSE: %.3f' % (1, rmse))
error_scores.append(rmse)
print predictions
print raw_values[10000:10100]
这是我从 运行 得到的结果(提高 epoch 似乎并没有使它变得更好):
Training Step: 472 | total loss: 0.00486 | time: 0.421s
| Adam | epoch: 006 | loss: 0.00486 - binary_acc: 0.0000 -- iter: 9856/9999
Training Step: 473 | total loss: 0.00453 | time: 0.427s
| Adam | epoch: 006 | loss: 0.00453 - binary_acc: 0.0000 -- iter: 9984/9999
Training Step: 474 | total loss: 0.00423 | time: 0.430s
| Adam | epoch: 006 | loss: 0.00423 - binary_acc: 0.0000 -- iter: 9999/9999
我已尝试 lower/raise 大部分设置,但没有成功。
这是data I'm using(单变量时间序列)的摘录,在训练中使用或多或少的数据也没有做任何事情。
(Ps:我的代码主要来自 this tutorial,我不得不稍微修改一下,因为我想尝试使用 Tflearn)
您不能为回归问题定义 accuracy
。您只需跟踪预测和实际的 MSE。你的训练损失似乎很低,所以如果预测不接近,那么要么你的缩放逆不正确,要么你过度拟合。