如何加入编码器和解码器

How to join an encoder and a decoder

我构建了以下编码器-解码器架构,编码器和解码器都可以单独工作:

from tensorflow.keras.layers import LSTM, Input, Reshape, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K

WORD_TO_INDEX = {"foo": 0, "bar": 1}

MAX_QUERY_WORD_COUNT = 10
QUERY_ENCODING_SIZE = 15

# ENCODER
query_encoder_input = Input(shape=(None, len(WORD_TO_INDEX)), name="query_encoder_input")
query_encoder_output = LSTM(QUERY_ENCODING_SIZE, name="query_encoder_lstm")(query_encoder_input)
query_encoder = Model(inputs=query_encoder_input, outputs=query_encoder_output)

# DECODER
query_decoder_input = Input(shape=(QUERY_ENCODING_SIZE,), name="query_decoder_input")
query_decoder_reshape = Reshape((1, QUERY_ENCODING_SIZE), name="query_decoder_reshape")(query_decoder_input)
query_decoder_lstm = LSTM(QUERY_ENCODING_SIZE, name="query_decoder_lstm", return_sequences=True, return_state=True)
recurrent_input, state_h, state_c = query_decoder_lstm(query_decoder_reshape)
states = [state_h, state_c]
query_decoder_outputs = []
for _ in range(MAX_QUERY_WORD_COUNT):
    recurrent_input, state_h, state_c = query_decoder_lstm(recurrent_input, initial_state=states)
    query_decoder_outputs.append(recurrent_input)
    states = [state_h, state_c]
query_decoder_output = Lambda(lambda x: K.concatenate(x, axis=1), name="query_decoder_concat")(query_decoder_outputs)
query_decoder = Model(inputs=query_decoder_input, outputs=query_decoder_output)

但是当我尝试将它们连接在一起以创建一个自动编码器时,出现了一个奇怪的错误,我不知道为什么。

# AUTOENCODER
# apply the reshape layer to the output of the encoder
query_autoencoder_output = query_decoder.layers[1](query_encoder_output)
# rebuild the autoencoder by applying each layer of the decoder to the output of the encoder
for decoder_layer in query_decoder.layers[2:]:
    # this fails and I don't know why
    query_autoencoder_output = decoder_layer(query_autoencoder_output)
# the code never gets here
query_autoencoder = Model(inputs=query_encoder_input, outputs=query_autoencoder_output)

这会引发错误:

ValueError: Shape must be rank 3 but is rank 2 for '{{node query_decoder_concat/concat_1}} = ConcatV2[N=3, T=DT_FLOAT, Tidx=DT_INT32](query_decoder_lstm/PartitionedCall_11:1, query_decoder_lstm/PartitionedCall_11:2, query_decoder_lstm/PartitionedCall_11:3, query_decoder_concat/concat_1/axis)' with input shapes: [?,1,15], [?,15], [?,15], [].

This 是我用于解码器的模板。 (请参阅“如果我不想使用教师强制进行训练怎么办?”部分。)

我依靠 (尤其是最后一个)来弄清楚如何将模型组合在一起。

这个错误是什么意思,我该如何解决?

您可以将模型视为层 * 本质上 *。使用自动编码器,它会像这样简单:

autoencoder = Sequential([encoder, decoder])

如果你想要一些额外的灵活性,你可以继承 tf.keras.Model:

class AutoEncoder(tf.keras.Model):
    def __init__(self, encoder, decoder):
        super(AutoEncoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def call(self, inputs, training=None, **kwargs):
        x = self.encoder(inputs)
        x = self.decoder(x)
        return x

ae = AutoEncoder(encoder, decoder)

ae.fit(...

完整的可重现示例:

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import numpy as np

(xtrain, ytrain), (xtest, ytest) = keras.datasets.cifar10.load_data()

train_ix = np.where(ytrain.ravel() == 1)
test_ix = np.where(ytest.ravel() == 1)

cars_train = xtrain[train_ix]
cars_test = xtest[test_ix]

cars = np.vstack([cars_train, cars_test]).astype(np.float32)/255

X = tf.data.Dataset.from_tensor_slices(cars).batch(8)


class Encoder(keras.Model):
    def __init__(self):
        super(Encoder, self).__init__()
        self.flat = keras.layers.Flatten(input_shape=(32, 32, 3))
        self.dense1 = keras.layers.Dense(128)
        self.dense2 = keras.layers.Dense(32)

    def call(self, inputs, training=None, **kwargs):
        x = self.flat(inputs)
        x = keras.activations.selu(self.dense1(x))
        x = keras.activations.selu(self.dense2(x))
        return x


class Decoder(keras.Model):
    def __init__(self):
        super(Decoder, self).__init__()
        self.dense1 = keras.layers.Dense(128, input_shape=[32])
        self.dense2 = keras.layers.Dense(32 * 32 * 3)
        self.reshape = keras.layers.Reshape([32, 32, 3])

    def call(self, inputs, training=None, **kwargs):
        x = keras.activations.selu(self.dense1(inputs))
        x = keras.activations.sigmoid(self.dense2(x))
        x = self.reshape(x)
        return x


class AutoEncoder(keras.Model):
    def __init__(self, encoder, decoder):
        super(AutoEncoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def call(self, inputs, training=None, **kwargs):
        x = self.encoder(inputs)
        x = self.decoder(x)
        return x


ae = AutoEncoder(Encoder(), Decoder())

loss_object = keras.losses.BinaryCrossentropy()

reconstruction_loss = keras.metrics.Mean(name='reconstruction_loss')

optimizer = keras.optimizers.Adam()


@tf.function
def reconstruct(inputs):
    with tf.GradientTape() as tape:
        out = ae(inputs)
        loss = loss_object(inputs, out)

    gradients = tape.gradient(loss, ae.trainable_variables)
    optimizer.apply_gradients(zip(gradients, ae.trainable_variables))

    reconstruction_loss(loss)


if __name__ == '__main__':
    template = 'Epoch {:2} Reconstruction Loss {:.4f}'
    for epoch in range(50):
        reconstruction_loss.reset_states()
        for input_batches in X:
            reconstruct(input_batches)
        print(template.format(epoch + 1, reconstruction_loss.result()))

输出:

Epoch 35 Reconstruction Loss 0.5794
Epoch 36 Reconstruction Loss 0.5793
Epoch 37 Reconstruction Loss 0.5792
Epoch 38 Reconstruction Loss 0.5791
Epoch 39 Reconstruction Loss 0.5790
Epoch 40 Reconstruction Loss 0.5789

基于 ,但不使用 Sequential,您可以这样做:

query_autoencoder = Model(inputs=query_encoder_input, outputs=query_decoder(query_encoder_output))
query_autoencoder.summary()

摘要也比 M Z 的回答分解成更多层。