尝试 运行 来自 Keras 的顺序模型时出现 ValueError
ValueError while trying to run the Sequential Model from Keras
我正在尝试使用 Keras 构建 NARX NN。我仍然不能 100% 确定在 LSTM 神经元中使用参数 return_sequence=True 但是,在我检查它之前,我需要让代码工作.当我尝试 运行 时,我收到以下消息:
ValueError: Error when checking input: expected lstm_84_input to have 3 dimensions, but got array with shape (6686, 3)
请参阅下面的代码。在 运行 执行 model.fit 命令时出现错误。我的数据数据的形状为 40101 个时间步长 x 6 个特征(3 个外生输入,3 个系统响应)。
import numpy as np
import pandas as pd
from sklearn.model_selection import TimeSeriesSplit
import tensorflow as tf
from tensorflow.keras import initializers
# --- main
data = pd.read_excel('example.xlsx',usecols=['wave','wind','current','X','Y','RZ'])
data.plot(subplots=True, figsize=[15,10])
x_data = np.array(data.loc[:,['wave','wind','current']])
y_data = np.array(data.loc[:,['X','Y','RZ']])
timeSeriesCrossValidation = TimeSeriesSplit(n_splits=5)
for train, validation in timeSeriesCrossValidation.split(x_data, y_data):
# create model
model = tf.keras.models.Sequential()
# input layer
model.add(tf.keras.layers.LSTM(units=50,
input_shape=(40101,3),
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0,stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences = True))
# 1st hidden layer
model.add(tf.keras.layers.LSTM(units=50,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0,stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences = True))
# 2nd hidder layer
model.add(tf.keras.layers.LSTM(units=50,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0,stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences = False))
# output layer
model.add(tf.keras.layers.Dense(3))
model.compile(loss='mse',optimizer='nadam',metrics=['accuracy'])
model.fit(x_data[train], y_data[train],
verbose=2,
batch_size=None,
epochs=10,
validation_data=(x_data[validation], y_data[validation])
#callbacks=early_stop
)
prediction = model.predict(x_data[validation])
y_validation = y_data[validation]
LSTM 层需要 3 个维度的输入:
(n_samples, time_steps, features)
您传递的数据格式如下:
(n_samples, features)
由于您没有创建时间步长的函数,最简单的解决方案是将输入更改为形状:
(40101, 1, 3)
虚假数据:
x_data = np.random.rand(40101, 1, 3)
y_data = np.random.rand(40101, 3)
此外,您不应在 Keras 层的 input_shape
参数中传递样本数。只需使用这个:
input_shape=(1, 3)
所以这是更正后的代码(带有虚假数据):
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
import tensorflow as tf
from tensorflow.keras import initializers
from tensorflow.keras.layers import *
x_data = np.random.rand(40101, 1, 3)
y_data = np.random.rand(40101, 3)
timeSeriesCrossValidation = TimeSeriesSplit(n_splits=5)
for train, validation in timeSeriesCrossValidation.split(x_data, y_data):
# create model
model = tf.keras.models.Sequential()
# input layer
model.add(LSTM(units=5,
input_shape=(1, 3),
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0, stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences=True))
# 1st hidden layer
model.add(LSTM(units=5,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0, stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences=True))
# 2nd hidder layer
model.add(LSTM(units=50,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0, stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences=False))
# output layer
model.add(tf.keras.layers.Dense(3))
model.compile(loss='mse', optimizer='nadam', metrics=['accuracy'])
model.fit(x_data[train], y_data[train],
verbose=2,
batch_size=None,
epochs=1,
validation_data=(x_data[validation], y_data[validation])
# callbacks=early_stop
)
prediction = model.predict(x_data[validation])
y_validation = y_data[validation]
如果你想要一个函数来创建时间步长,使用这个:
def multivariate_data(dataset, target, start_index, end_index, history_size,
target_size, step, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i+target_size])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
它会给你正确的形状,例如:
multivariate_data(dataset=np.random.rand(40101, 3),
target=np.random.rand(40101, 3),
0, len(x_data), 5, 0, 1, True)[0].shape
(40096, 5, 3)
您丢失了 5 个数据点,因为一开始您无法回顾过去的 5 步。
我正在尝试使用 Keras 构建 NARX NN。我仍然不能 100% 确定在 LSTM 神经元中使用参数 return_sequence=True 但是,在我检查它之前,我需要让代码工作.当我尝试 运行 时,我收到以下消息:
ValueError: Error when checking input: expected lstm_84_input to have 3 dimensions, but got array with shape (6686, 3)
请参阅下面的代码。在 运行 执行 model.fit 命令时出现错误。我的数据数据的形状为 40101 个时间步长 x 6 个特征(3 个外生输入,3 个系统响应)。
import numpy as np
import pandas as pd
from sklearn.model_selection import TimeSeriesSplit
import tensorflow as tf
from tensorflow.keras import initializers
# --- main
data = pd.read_excel('example.xlsx',usecols=['wave','wind','current','X','Y','RZ'])
data.plot(subplots=True, figsize=[15,10])
x_data = np.array(data.loc[:,['wave','wind','current']])
y_data = np.array(data.loc[:,['X','Y','RZ']])
timeSeriesCrossValidation = TimeSeriesSplit(n_splits=5)
for train, validation in timeSeriesCrossValidation.split(x_data, y_data):
# create model
model = tf.keras.models.Sequential()
# input layer
model.add(tf.keras.layers.LSTM(units=50,
input_shape=(40101,3),
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0,stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences = True))
# 1st hidden layer
model.add(tf.keras.layers.LSTM(units=50,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0,stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences = True))
# 2nd hidder layer
model.add(tf.keras.layers.LSTM(units=50,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0,stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences = False))
# output layer
model.add(tf.keras.layers.Dense(3))
model.compile(loss='mse',optimizer='nadam',metrics=['accuracy'])
model.fit(x_data[train], y_data[train],
verbose=2,
batch_size=None,
epochs=10,
validation_data=(x_data[validation], y_data[validation])
#callbacks=early_stop
)
prediction = model.predict(x_data[validation])
y_validation = y_data[validation]
LSTM 层需要 3 个维度的输入:
(n_samples, time_steps, features)
您传递的数据格式如下:
(n_samples, features)
由于您没有创建时间步长的函数,最简单的解决方案是将输入更改为形状:
(40101, 1, 3)
虚假数据:
x_data = np.random.rand(40101, 1, 3)
y_data = np.random.rand(40101, 3)
此外,您不应在 Keras 层的 input_shape
参数中传递样本数。只需使用这个:
input_shape=(1, 3)
所以这是更正后的代码(带有虚假数据):
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
import tensorflow as tf
from tensorflow.keras import initializers
from tensorflow.keras.layers import *
x_data = np.random.rand(40101, 1, 3)
y_data = np.random.rand(40101, 3)
timeSeriesCrossValidation = TimeSeriesSplit(n_splits=5)
for train, validation in timeSeriesCrossValidation.split(x_data, y_data):
# create model
model = tf.keras.models.Sequential()
# input layer
model.add(LSTM(units=5,
input_shape=(1, 3),
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0, stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences=True))
# 1st hidden layer
model.add(LSTM(units=5,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0, stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences=True))
# 2nd hidder layer
model.add(LSTM(units=50,
dropout=0.01,
recurrent_dropout=0.2,
kernel_initializer=initializers.RandomNormal(mean=0, stddev=.5),
bias_initializer=initializers.Zeros(),
return_sequences=False))
# output layer
model.add(tf.keras.layers.Dense(3))
model.compile(loss='mse', optimizer='nadam', metrics=['accuracy'])
model.fit(x_data[train], y_data[train],
verbose=2,
batch_size=None,
epochs=1,
validation_data=(x_data[validation], y_data[validation])
# callbacks=early_stop
)
prediction = model.predict(x_data[validation])
y_validation = y_data[validation]
如果你想要一个函数来创建时间步长,使用这个:
def multivariate_data(dataset, target, start_index, end_index, history_size,
target_size, step, single_step=False):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
indices = range(i-history_size, i, step)
data.append(dataset[indices])
if single_step:
labels.append(target[i+target_size])
else:
labels.append(target[i:i+target_size])
return np.array(data), np.array(labels)
它会给你正确的形状,例如:
multivariate_data(dataset=np.random.rand(40101, 3),
target=np.random.rand(40101, 3),
0, len(x_data), 5, 0, 1, True)[0].shape
(40096, 5, 3)
您丢失了 5 个数据点,因为一开始您无法回顾过去的 5 步。