构建多模态 LSTM
Build a multimodal LSTM
我有以下 LSTM 网络。我想将此图中的红线添加到模型中。 这是我的模型:
import numpy as np
import tensorflow as tf
from keras.models import Sequential, Model,load_model
from keras.layers import Dense, Dropout, Activation, LSTM, Input, concatenate
from keras.utils import np_utils
from sklearn.metrics import mean_squared_error
import keras
from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
X1 = np.random.normal(size=(100,1,2))
X2 = np.random.normal(size=(100,1,2))
X3 = np.random.normal(size=(100,1,2))
Y = np.random.normal(size=(100,18))
input_1 = Input(shape=(X1.shape[1], X1.shape[2]), name='input_1')
input_2 = Input(shape=(X2.shape[1], X2.shape[2]), name='input_2')
input_3 = Input(shape=(X3.shape[1], X3.shape[2]), name='input_3')
# lstms
lstm1 = LSTM(200, name='lstm1')(input_1)
lstm2 = LSTM(200, name='lstm2')(input_2)
lstm3 = LSTM(200, name='lstm3')(input_3)
## outputs
output1 = Dense(18, activation="linear", name='out1')(lstm1)
output2 = Dense(18, activation="linear", name='out2')(lstm2)
output3 = Dense(18, activation="linear", name='out3')(lstm3)
concat = concatenate([lstm1, lstm2, lstm3])
output = Dense(18, activation="linear", name='out1')(concat)
model = Model(inputs=[input_1, input_2, input_3], outputs=output)
model.compile(optimizer = 'adam', loss = 'mean_squared_error',metrics = ['MAE'])
model.fit([X1, X2, X3], Y, epochs =1, batch_size = 100)
谁能帮我搭建这个模型?谢谢
尝试在 LSTM
层中使用 return_state=True
。它允许您获得 LSTM 计算的最后一个 h 和 c。所以你可以在下面的 LSTM 中使用它们作为 initial_state
:
lstm1,h1,c1 = LSTM(200, name='lstm1',return_state=True)(input_1)
lstm2,h2,c2 = LSTM(200, name='lstm2',return_state=True)(input_2,initial_state=[h1,c1])
lstm3 = LSTM(200, name='lstm3')(input_3,initial_state=[h2,c2])
给你(显示不好...):
我有以下 LSTM 网络。我想将此图中的红线添加到模型中。
import numpy as np
import tensorflow as tf
from keras.models import Sequential, Model,load_model
from keras.layers import Dense, Dropout, Activation, LSTM, Input, concatenate
from keras.utils import np_utils
from sklearn.metrics import mean_squared_error
import keras
from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
X1 = np.random.normal(size=(100,1,2))
X2 = np.random.normal(size=(100,1,2))
X3 = np.random.normal(size=(100,1,2))
Y = np.random.normal(size=(100,18))
input_1 = Input(shape=(X1.shape[1], X1.shape[2]), name='input_1')
input_2 = Input(shape=(X2.shape[1], X2.shape[2]), name='input_2')
input_3 = Input(shape=(X3.shape[1], X3.shape[2]), name='input_3')
# lstms
lstm1 = LSTM(200, name='lstm1')(input_1)
lstm2 = LSTM(200, name='lstm2')(input_2)
lstm3 = LSTM(200, name='lstm3')(input_3)
## outputs
output1 = Dense(18, activation="linear", name='out1')(lstm1)
output2 = Dense(18, activation="linear", name='out2')(lstm2)
output3 = Dense(18, activation="linear", name='out3')(lstm3)
concat = concatenate([lstm1, lstm2, lstm3])
output = Dense(18, activation="linear", name='out1')(concat)
model = Model(inputs=[input_1, input_2, input_3], outputs=output)
model.compile(optimizer = 'adam', loss = 'mean_squared_error',metrics = ['MAE'])
model.fit([X1, X2, X3], Y, epochs =1, batch_size = 100)
谁能帮我搭建这个模型?谢谢
尝试在 LSTM
层中使用 return_state=True
。它允许您获得 LSTM 计算的最后一个 h 和 c。所以你可以在下面的 LSTM 中使用它们作为 initial_state
:
lstm1,h1,c1 = LSTM(200, name='lstm1',return_state=True)(input_1)
lstm2,h2,c2 = LSTM(200, name='lstm2',return_state=True)(input_2,initial_state=[h1,c1])
lstm3 = LSTM(200, name='lstm3')(input_3,initial_state=[h2,c2])
给你(显示不好...):