tf.nn.static_rnn - 输入必须是一个序列

tf.nn.static_rnn - input must be a sequence

我正在尝试创建一个 2 层 lstm(包括 dropout),但收到 'inputs must be a sequence' 的错误消息。

我使用嵌入作为输入,但不确定如何将它们更改为序列?非常感谢任何解释。

这是我的图表定义:

    with tf.name_scope('Placeholders'):
        input_x = tf.placeholder(tf.int32, [None, n_steps], name='input_x')
        input_y = tf.placeholder(tf.float32, [None, n_classes], name='input_y')
        dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')


    with tf.name_scope('Embedding_layer'):
        embeddings_var = tf.Variable(tf.random_uniform([vocab_size, EMBEDDING_DIM], -1.0, 1.0), trainable=True)
        embedded_chars = tf.nn.embedding_lookup(embeddings_var, input_x)
        print(embedded_chars, 'embed')


    def get_a_cell(lstm_size, keep_prob):
        lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
        drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout_keep_prob)
        return drop


    with tf.name_scope('lstm'):
        cell = tf.nn.rnn_cell.MultiRNNCell(
            [get_a_cell(num_hidden, dropout_keep_prob) for _ in range(num_layers)]
        )

    lstm_outputs, state = tf.nn.static_rnn(cell=cell,inputs=embedded_chars, dtype=tf.float32)

    with tf.name_scope('Fully_connected'):
        W = tf.Variable(tf.truncated_normal([num_hidden, n_classes], stddev=0.1))
        b = tf.Variable(tf.constant(0.1, shape=n_classes))
        output = tf.nn.xw_plus_b(lstm_outputs,W,b)
        predictions = tf.argmax(output, 1, name='predictions')

    with tf.name_scope('Loss'):
        # Cross-entropy loss and optimizer initialization
        loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=input_y))
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss1, global_step=global_step)

    with tf.name_scope('Accuracy'):
        # Accuracy metrics
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.nn.softmax(output)), input_y), tf.float32))

    with tf.name_scope('num_correct'):
        correct_predictions = tf.equal(predictions, tf.argmax(input_y, 1))
        num_correct = tf.reduce_sum(tf.cast(correct_predictions, 'float'), name='num_correct')

编辑: 将 static_rnn 更改为 dynamic_rnn 时,错误消息更改为以下内容,偏差 (b) 变量失败:

TypeError: 'int' object is not iterable

我将偏差项更改为:

b = tf.Variable(tf.random_normal([n_classes]))

并收到一条新的错误消息:

ValueError: Shape must be rank 2 but is rank 3 for 'Fully_connected/xw_plus_b/MatMul' (op: 'MatMul') with input shapes: [?,27,128], [128,6].

如果我们假设您使用 tf.dynamic_rnn(对于 tf.static_rnn 的情况,第一个问题是因为您没有以正确的格式提供输入,tf.static_rnn 除了张量序列,例如张量列表 [batch_size x seq_len] 而不是形状为 [batch_size x seq_len x dim] 的单个张量,而 tf.dynamic_rnn 处理此类张量作为输入)

我邀请您阅读 tf.nn_dynamic_rnn 的文档,以了解对于您的分类问题,您可能不想使用 lstm_outputsstate 基本上包含您的最后输出RNN,因为 lstm_output 包含所有输出,而在这里你只对 last_output 感兴趣(除非你想做一些像分类注意力这样的事情,在这里你需要所有输出)。

要获得最后的输出,您基本上需要这样做:

lstm_outputs, state = tf.nn.dynamic_rnn(cell=cell,inputs=embedded_chars, dtype=tf.float32)
last_output = state[-1].h

state[-1] 获取最后一个单元格的状态,然后 h 包含最后一个输出并将 last_output 传递给您的前馈网络。

完整代码

(工作,但计算错误的准确性见评论)

n_classes = 6
n_steps = 27
num_hidden=128
dropout_keep_prob =0.5
vocab_size=10000
EMBEDDING_DIM=300
num_layers = 2

with tf.name_scope('Placeholders'):
    input_x = tf.placeholder(tf.int32, [None, n_steps], name='input_x')
    input_y = tf.placeholder(tf.float32, [None, n_classes], name='input_y')
    dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')


with tf.name_scope('Embedding_layer'):
    embeddings_var = tf.Variable(tf.random_uniform([vocab_size, EMBEDDING_DIM], -1.0, 1.0), trainable=True)
    embedded_chars = tf.nn.embedding_lookup(embeddings_var, input_x)
    print(embedded_chars, 'embed')


def get_a_cell(lstm_size, keep_prob):
    lstm = tf.nn.rnn_cell.BasicLSTMCell(lstm_size)
    drop = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=dropout_keep_prob)
    return drop


with tf.name_scope('lstm'):
    cell = tf.nn.rnn_cell.MultiRNNCell(
        [get_a_cell(num_hidden, dropout_keep_prob) for _ in range(num_layers)]
    )

lstm_outputs, state = tf.nn.dynamic_rnn(cell=cell,inputs=embedded_chars, dtype=tf.float32)
last_output = state[-1].h
with tf.name_scope('Fully_connected'):
    W = tf.Variable(tf.truncated_normal([num_hidden, n_classes], stddev=0.1))
    b = tf.Variable(tf.constant(0.1, shape=[n_classes]))
    output = tf.nn.xw_plus_b(last_output,W,b)
    predictions = tf.argmax(output, 1, name='predictions')

with tf.name_scope('Loss'):
    # Cross-entropy loss and optimizer initialization
    loss1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=input_y))
    global_step = tf.Variable(0, name="global_step", trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss1, global_step=global_step)

with tf.name_scope('Accuracy'):
    # Accuracy metrics
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.nn.softmax(output)), input_y), tf.float32))

with tf.name_scope('num_correct'):
    correct_predictions = tf.equal(predictions, tf.argmax(input_y, 1))
    num_correct = tf.reduce_sum(tf.cast(correct_predictions, 'float'), name='num_correct')