从 LSTM 自动编码器馈送分类器数据

Feeding Classifier data from LSTM Autoencoder

目标:

我构建了一个 LSTM 自动编码器,用于减少特征。我的计划是对一些输入进行编码,并在将来将其提供给分类器。编码器采用 [batch_size, timesteps, features_of_timesteps 形状的数据,但是在编码器部分的输出层中,我只返回 [1, timesteps, features_of_timesteps].

形式的最后一个隐藏状态
class Encoder(nn.Module):
    def __init__(self, input_size, first_layer, second_layer, n_layers):
        super(Encoder, self).__init__()
        self.n_layers = n_layers
        self.encode = nn.Sequential(nn.LSTM(input_size, first_layer, batch_first=True),
                                    getSequence(),
                                    nn.ReLU(True),
                                    nn.LSTM(first_layer, second_layer),
                                    getLast())
        self.decode = nn.Sequential(nn.LSTM(second_layer, first_layer, batch_first=True),
                                    getSequence(),
                                    nn.ReLU(True),
                                    nn.LSTM(first_layer, input_size),
                                    getSequence())

    def forward(self, x):
        x = x.float()
        x = self.encode(x)
        x = x.repeat(batch_size, 1, 1)
        x = self.decode(x)
        return x

担心:

恐怕模型编码部分中我的第二个 LSTM 层的最后隐藏状态正在汇总整个批次并降低特征维数。这感觉不对,因为我试图将单个时间序列缩减为更小的向量,而不是将整批时间序列缩减为一个向量。我的担心是否正确?

你的代码中有多个问题,为了简单起见,我只给你一个定义明确的模型,下面的代码构建一个 LSTM 自动编码器,重建形状为 (batch_size, timesteps, number_of_features_at_each_timesteps):

import torch
from torch import nn

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class Encoder(nn.Module):
  def __init__(self, seq_len, n_features, embedding_dim=64):
    super(Encoder, self).__init__()

    self.seq_len, self.n_features = seq_len, n_features
    self.embedding_dim, self.hidden_dim = embedding_dim, 2 * embedding_dim

    self.rnn1 = nn.LSTM(
      input_size=n_features,
      hidden_size=self.hidden_dim,
      num_layers=1,
      batch_first=True
    )
    self.rnn2 = nn.LSTM(
      input_size=self.hidden_dim,
      hidden_size=self.embedding_dim,
      num_layers=1,
      batch_first=True
    )

  def forward(self, x):
    x, (_, _) = self.rnn1(x)
    x, (hidden_n, _) = self.rnn2(x)
    return hidden_n

class Decoder(nn.Module):
  def __init__(self, seq_len, input_dim=64, n_features=1):
    super(Decoder, self).__init__()

    self.seq_len, self.input_dim = seq_len, input_dim
    self.hidden_dim, self.n_features = 2 * input_dim, n_features

    self.rnn1 = nn.LSTM(
      input_size=input_dim,
      hidden_size=input_dim,
      num_layers=1,
      batch_first=True
    )
    self.rnn2 = nn.LSTM(
      input_size=input_dim,
      hidden_size=self.hidden_dim,
      num_layers=1,
      batch_first=True
    )
    self.output_layer = nn.Linear(self.hidden_dim, n_features)

  def forward(self, x):
    x = x.repeat(self.seq_len, 1, 1)
    x = x.permute(1, 0, 2)
    x, (hidden_n, cell_n) = self.rnn1(x)
    x, (hidden_n, cell_n) = self.rnn2(x)
    return self.output_layer(x)

class RecurrentAutoencoder(nn.Module):
  def __init__(self, seq_len, n_features, embedding_dim=64):
    super(RecurrentAutoencoder, self).__init__()

    self.encoder = Encoder(seq_len, n_features, embedding_dim).to(device)
    self.decoder = Decoder(seq_len, embedding_dim, n_features).to(device)

  def forward(self, x):
    print("Inputs size:", x.size())
    x = self.encoder(x)
    print("Representation size: ", x.size())
    x = self.decoder(x)
    print("Outputs size: ", x.size())
    return x

batch_n = 5
seq_len = 10
n_features = 3
inputs = torch.randn(batch_n, seq_len, n_features).to(device)

model = RecurrentAutoencoder(seq_len, n_features).to(device)
y = model(inputs)

输出:

Inputs size: torch.Size([5, 10, 3])
Representation size:  torch.Size([1, 5, 64])
Outputs size:  torch.Size([5, 10, 3])

注意表示(即编码器的输出)具有 (1, batch_size, embedding_dim)

的形状