在 Tensorflow 中重用 LSTM 的重用变量
Reuse Reusing Variable of LSTM in Tensorflow
我正在尝试使用 LSTM
制作 RNN
。
我做了LSTM
模型,模型后面有两个DNN
网络,一个回归输出层。
我训练了我的数据,最后的训练损失变成了大约 0.009
。
但是,当我将该模型应用于测试数据时,损失变为大约 0.5
.
第 1 个 epoch 的训练损失约为 0.5
。
所以,我认为训练的变量没有在测试模型中使用。
训练模型和测试模型之间的唯一区别是批量大小。
Trainning Batch = 100~200
, Test Batch Size = 1
.
在 main 函数中我创建了 LSTM
实例。
在 LSTM
初始化器中,创建模型。
def __init__(self,config,train_model=None):
self.sess = sess = tf.Session()
self.num_steps = num_steps = config.num_steps
self.lstm_size = lstm_size = config.lstm_size
self.num_features = num_features = config.num_features
self.num_layers = num_layers = config.num_layers
self.num_hiddens = num_hiddens = config.num_hiddens
self.batch_size = batch_size = config.batch_size
self.train = train = config.train
self.epoch = config.epoch
self.learning_rate = learning_rate = config.learning_rate
with tf.variable_scope('model') as scope:
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size,initializer = tf.contrib.layers.xavier_initializer(uniform=False))
self.cell = cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
with tf.name_scope('placeholders'):
self.x = tf.placeholder(tf.float32,[self.batch_size,num_steps,num_features],
name='input-x')
self.y = tf.placeholder(tf.float32, [self.batch_size,num_features],name='input-y')
self.init_state = cell.zero_state(self.batch_size,tf.float32)
with tf.variable_scope('model'):
self.W1 = tf.Variable(tf.truncated_normal([lstm_size*num_steps,num_hiddens],stddev=0.1),name='W1')
self.b1 = tf.Variable(tf.truncated_normal([num_hiddens],stddev=0.1),name='b1')
self.W2 = tf.Variable(tf.truncated_normal([num_hiddens,num_hiddens],stddev=0.1),name='W2')
self.b2 = tf.Variable(tf.truncated_normal([num_hiddens],stddev=0.1),name='b2')
self.W3 = tf.Variable(tf.truncated_normal([num_hiddens,num_features],stddev=0.1),name='W3')
self.b3 = tf.Variable(tf.truncated_normal([num_features],stddev=0.1),name='b3')
self.output, self.loss = self.inference()
tf.initialize_all_variables().run(session=sess)
tf.initialize_variables([self.b2]).run(session=sess)
if train_model == None:
self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
使用 Above LSTM init,制作了 below LSTM 实例。
with tf.variable_scope("model",reuse=None):
train_model = LSTM(main_config)
with tf.variable_scope("model", reuse=True):
predict_model = LSTM(predict_config)
制作两个 LSTM
实例后,我训练了 train_model
。
我在predict_model
.
中输入了测试集
为什么变量没有被重用?
问题是如果您要重复使用 scope
.
,您应该使用 tf.get_variable()
而不是 tf.Variable()
来创建变量
看看at this tutorial共享变量,你会更好地理解它。
此外,您不需要在此处使用会话,因为您不必在定义模型时初始化变量,应该在训练模型时初始化变量。
重用变量的代码如下:
def __init__(self,config,train_model=None):
self.num_steps = num_steps = config.num_steps
self.lstm_size = lstm_size = config.lstm_size
self.num_features = num_features = config.num_features
self.num_layers = num_layers = config.num_layers
self.num_hiddens = num_hiddens = config.num_hiddens
self.batch_size = batch_size = config.batch_size
self.train = train = config.train
self.epoch = config.epoch
self.learning_rate = learning_rate = config.learning_rate
with tf.variable_scope('model') as scope:
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size,initializer = tf.contrib.layers.xavier_initializer(uniform=False))
self.cell = cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
with tf.name_scope('placeholders'):
self.x = tf.placeholder(tf.float32,[self.batch_size,num_steps,num_features],
name='input-x')
self.y = tf.placeholder(tf.float32, [self.batch_size,num_features],name='input-y')
self.init_state = cell.zero_state(self.batch_size,tf.float32)
with tf.variable_scope('model'):
self.W1 = tf.get_variable(initializer=tf.truncated_normal([lstm_size*num_steps,num_hiddens],stddev=0.1),name='W1')
self.b1 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens],stddev=0.1),name='b1')
self.W2 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens,num_hiddens],stddev=0.1),name='W2')
self.b2 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens],stddev=0.1),name='b2')
self.W3 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens,num_features],stddev=0.1),name='W3')
self.b3 = tf.get_variable(initializer=tf.truncated_normal([num_features],stddev=0.1),name='b3')
self.output, self.loss = self.inference()
if train_model == None:
self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
要查看在创建 train_model
和 predict_model
之后创建了哪些变量,请使用以下代码:
for v in tf.all_variables():
print(v.name)
我正在尝试使用 LSTM
制作 RNN
。
我做了LSTM
模型,模型后面有两个DNN
网络,一个回归输出层。
我训练了我的数据,最后的训练损失变成了大约 0.009
。
但是,当我将该模型应用于测试数据时,损失变为大约 0.5
.
第 1 个 epoch 的训练损失约为 0.5
。
所以,我认为训练的变量没有在测试模型中使用。
训练模型和测试模型之间的唯一区别是批量大小。
Trainning Batch = 100~200
, Test Batch Size = 1
.
在 main 函数中我创建了 LSTM
实例。
在 LSTM
初始化器中,创建模型。
def __init__(self,config,train_model=None):
self.sess = sess = tf.Session()
self.num_steps = num_steps = config.num_steps
self.lstm_size = lstm_size = config.lstm_size
self.num_features = num_features = config.num_features
self.num_layers = num_layers = config.num_layers
self.num_hiddens = num_hiddens = config.num_hiddens
self.batch_size = batch_size = config.batch_size
self.train = train = config.train
self.epoch = config.epoch
self.learning_rate = learning_rate = config.learning_rate
with tf.variable_scope('model') as scope:
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size,initializer = tf.contrib.layers.xavier_initializer(uniform=False))
self.cell = cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
with tf.name_scope('placeholders'):
self.x = tf.placeholder(tf.float32,[self.batch_size,num_steps,num_features],
name='input-x')
self.y = tf.placeholder(tf.float32, [self.batch_size,num_features],name='input-y')
self.init_state = cell.zero_state(self.batch_size,tf.float32)
with tf.variable_scope('model'):
self.W1 = tf.Variable(tf.truncated_normal([lstm_size*num_steps,num_hiddens],stddev=0.1),name='W1')
self.b1 = tf.Variable(tf.truncated_normal([num_hiddens],stddev=0.1),name='b1')
self.W2 = tf.Variable(tf.truncated_normal([num_hiddens,num_hiddens],stddev=0.1),name='W2')
self.b2 = tf.Variable(tf.truncated_normal([num_hiddens],stddev=0.1),name='b2')
self.W3 = tf.Variable(tf.truncated_normal([num_hiddens,num_features],stddev=0.1),name='W3')
self.b3 = tf.Variable(tf.truncated_normal([num_features],stddev=0.1),name='b3')
self.output, self.loss = self.inference()
tf.initialize_all_variables().run(session=sess)
tf.initialize_variables([self.b2]).run(session=sess)
if train_model == None:
self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
使用 Above LSTM init,制作了 below LSTM 实例。
with tf.variable_scope("model",reuse=None):
train_model = LSTM(main_config)
with tf.variable_scope("model", reuse=True):
predict_model = LSTM(predict_config)
制作两个 LSTM
实例后,我训练了 train_model
。
我在predict_model
.
为什么变量没有被重用?
问题是如果您要重复使用 scope
.
tf.get_variable()
而不是 tf.Variable()
来创建变量
看看at this tutorial共享变量,你会更好地理解它。
此外,您不需要在此处使用会话,因为您不必在定义模型时初始化变量,应该在训练模型时初始化变量。
重用变量的代码如下:
def __init__(self,config,train_model=None):
self.num_steps = num_steps = config.num_steps
self.lstm_size = lstm_size = config.lstm_size
self.num_features = num_features = config.num_features
self.num_layers = num_layers = config.num_layers
self.num_hiddens = num_hiddens = config.num_hiddens
self.batch_size = batch_size = config.batch_size
self.train = train = config.train
self.epoch = config.epoch
self.learning_rate = learning_rate = config.learning_rate
with tf.variable_scope('model') as scope:
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_size,initializer = tf.contrib.layers.xavier_initializer(uniform=False))
self.cell = cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
with tf.name_scope('placeholders'):
self.x = tf.placeholder(tf.float32,[self.batch_size,num_steps,num_features],
name='input-x')
self.y = tf.placeholder(tf.float32, [self.batch_size,num_features],name='input-y')
self.init_state = cell.zero_state(self.batch_size,tf.float32)
with tf.variable_scope('model'):
self.W1 = tf.get_variable(initializer=tf.truncated_normal([lstm_size*num_steps,num_hiddens],stddev=0.1),name='W1')
self.b1 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens],stddev=0.1),name='b1')
self.W2 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens,num_hiddens],stddev=0.1),name='W2')
self.b2 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens],stddev=0.1),name='b2')
self.W3 = tf.get_variable(initializer=tf.truncated_normal([num_hiddens,num_features],stddev=0.1),name='W3')
self.b3 = tf.get_variable(initializer=tf.truncated_normal([num_features],stddev=0.1),name='b3')
self.output, self.loss = self.inference()
if train_model == None:
self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
要查看在创建 train_model
和 predict_model
之后创建了哪些变量,请使用以下代码:
for v in tf.all_variables():
print(v.name)