如何获取Tensorflow/Tflearn中隐藏层节点的值?
How can we get the values of hidden layer nodes in Tensorflow/Tflearn?
这是 tflearn 中异或的代码。我希望获得倒数第二个隐藏层节点的值(而不是权重)。我怎样才能得到它?更具体地说,我希望为下面给出的四个预测中的每一个获得第 2 层节点的值(在代码中给出)。
import tensorflow as tf
import tflearn
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]] #input
Y_xor = [[0.], [1.], [1.], [0.]] #input_labels
# Graph definition
with tf.Graph().as_default():
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.input_data(shape=[None, 2], name='inputLayer')
net = tflearn.fully_connected(net, 2, activation='sigmoid', weights_init=tnorm, name='layer1')
net = tflearn.fully_connected(net, 1, activation='softmax', weights_init=tnorm, name='layer2')
regressor = tflearn.regression(net, optimizer='sgd', learning_rate=2., loss='mean_square', name='layer3')
# Training
m = tflearn.DNN(regressor)
m.fit(X, Y_xor, n_epoch=100, snapshot_epoch=False)
# Testing
print("Testing XOR operator")
print("0 xor 0:", m.predict([[0., 0.]]))
print("0 xor 1:", m.predict([[0., 1.]]))
print("1 xor 0:", m.predict([[1., 0.]]))
print("1 xor 1:", m.predict([[1., 1.]]))
layer1_var = tflearn.variables.get_layer_variables_by_name('layer1')
layer2_var = tflearn.variables.get_layer_variables_by_name('layer2')
inputLayer_var = tflearn.variables.get_layer_variables_by_name('inputLayer')
#result = tf.matmul(inputLayer_var, layer1_var[0]) + layer1_var[1]
with m.session.as_default():
print(tflearn.variables.get_value(layer1_var[0])) #layer1 weights
print(tflearn.variables.get_value(layer1_var[1])) #layer1 bias
print(tflearn.variables.get_value(layer2_var[0])) #layer2 weights
print(tflearn.variables.get_value(layer2_var[1])) #layer2 bias
您可以重复使用共享同一会话的新模型(以使用相同的权重):
.请注意,您还可以保存 'm' 模型并使用 'm2' 加载它,这会产生类似的结果。
import tensorflow as tf
import tflearn
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
Y_xor = [[0.], [1.], [1.], [0.]]
# Graph definition
with tf.Graph().as_default():
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.input_data(shape=[None, 2], name='inputLayer')
layer1 = tflearn.fully_connected(net, 2, activation='sigmoid', weights_init=tnorm, name='layer1')
layer2 = tflearn.fully_connected(layer1, 1, activation='softmax', weights_init=tnorm, name='layer2')
regressor = tflearn.regression(layer2, optimizer='sgd', learning_rate=2., loss='mean_square', name='layer3')
# Training
m = tflearn.DNN(regressor)
m.fit(X, Y_xor, n_epoch=100, snapshot_epoch=False)
# Testing
print("Testing XOR operator")
print("0 xor 0:", m.predict([[0., 0.]]))
print("0 xor 1:", m.predict([[0., 1.]]))
print("1 xor 0:", m.predict([[1., 0.]]))
print("1 xor 1:", m.predict([[1., 1.]]))
# You can create a new model, that share the same session (to get same weights)
# Or you can also simply save and load a model
m2 = tflearn.DNN(layer1, session=m.session)
print(m2.predict([[0., 0.]]))
这可能不能直接回答你的问题,但是如果你使用tflearn,获取每一层的权重就很简单了,
net = tflearn.fully_connected(net, 300)
self.fc2_w = net.W
self.fc2_b = net.b
只要记住一件事,将权重提取代码放在层之后,而不是在批归一化或单独激活之后
这是 tflearn 中异或的代码。我希望获得倒数第二个隐藏层节点的值(而不是权重)。我怎样才能得到它?更具体地说,我希望为下面给出的四个预测中的每一个获得第 2 层节点的值(在代码中给出)。
import tensorflow as tf
import tflearn
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]] #input
Y_xor = [[0.], [1.], [1.], [0.]] #input_labels
# Graph definition
with tf.Graph().as_default():
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.input_data(shape=[None, 2], name='inputLayer')
net = tflearn.fully_connected(net, 2, activation='sigmoid', weights_init=tnorm, name='layer1')
net = tflearn.fully_connected(net, 1, activation='softmax', weights_init=tnorm, name='layer2')
regressor = tflearn.regression(net, optimizer='sgd', learning_rate=2., loss='mean_square', name='layer3')
# Training
m = tflearn.DNN(regressor)
m.fit(X, Y_xor, n_epoch=100, snapshot_epoch=False)
# Testing
print("Testing XOR operator")
print("0 xor 0:", m.predict([[0., 0.]]))
print("0 xor 1:", m.predict([[0., 1.]]))
print("1 xor 0:", m.predict([[1., 0.]]))
print("1 xor 1:", m.predict([[1., 1.]]))
layer1_var = tflearn.variables.get_layer_variables_by_name('layer1')
layer2_var = tflearn.variables.get_layer_variables_by_name('layer2')
inputLayer_var = tflearn.variables.get_layer_variables_by_name('inputLayer')
#result = tf.matmul(inputLayer_var, layer1_var[0]) + layer1_var[1]
with m.session.as_default():
print(tflearn.variables.get_value(layer1_var[0])) #layer1 weights
print(tflearn.variables.get_value(layer1_var[1])) #layer1 bias
print(tflearn.variables.get_value(layer2_var[0])) #layer2 weights
print(tflearn.variables.get_value(layer2_var[1])) #layer2 bias
您可以重复使用共享同一会话的新模型(以使用相同的权重): .请注意,您还可以保存 'm' 模型并使用 'm2' 加载它,这会产生类似的结果。
import tensorflow as tf
import tflearn
X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
Y_xor = [[0.], [1.], [1.], [0.]]
# Graph definition
with tf.Graph().as_default():
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
net = tflearn.input_data(shape=[None, 2], name='inputLayer')
layer1 = tflearn.fully_connected(net, 2, activation='sigmoid', weights_init=tnorm, name='layer1')
layer2 = tflearn.fully_connected(layer1, 1, activation='softmax', weights_init=tnorm, name='layer2')
regressor = tflearn.regression(layer2, optimizer='sgd', learning_rate=2., loss='mean_square', name='layer3')
# Training
m = tflearn.DNN(regressor)
m.fit(X, Y_xor, n_epoch=100, snapshot_epoch=False)
# Testing
print("Testing XOR operator")
print("0 xor 0:", m.predict([[0., 0.]]))
print("0 xor 1:", m.predict([[0., 1.]]))
print("1 xor 0:", m.predict([[1., 0.]]))
print("1 xor 1:", m.predict([[1., 1.]]))
# You can create a new model, that share the same session (to get same weights)
# Or you can also simply save and load a model
m2 = tflearn.DNN(layer1, session=m.session)
print(m2.predict([[0., 0.]]))
这可能不能直接回答你的问题,但是如果你使用tflearn,获取每一层的权重就很简单了,
net = tflearn.fully_connected(net, 300)
self.fc2_w = net.W
self.fc2_b = net.b
只要记住一件事,将权重提取代码放在层之后,而不是在批归一化或单独激活之后