TensorFlow:在另一个图中调用一个图

TensorFlow: calling a graph inside another graph

我需要将一个图 (g1) 的 "logits" 作为另一个图 (g2) 的输入。然后,我需要在输入为 "logits" 时获取 g2 的层输出。在对层输出进行一些计算后,我应该 return 自定义损失值到 g1。

这是第一张图:

g1 = tf.Graph() 
with g.as_default():
    X = tf.placeholder(dtype=tf.float32, shape=[...])
    Y = tf.placeholder(dtype=tf.float32, shape=[...])
    ...
    logits = tf.matmul(flatten, W2) + b2

    def custom_loss(logits):
        # get layer output values of g2 on the input "logits" 
        # some calculations on layer outputs
       return loss

    mse = tf.reduce_mean(tf.squared_difference(logits, Y))

    loss = mse + custom_loss(logits)

    step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)

sess1 = tf.InteractiveSession(graph=g1)
tf.global_variables_initializer().run()

这是第二张图:

g2 = tf.Graph()
with g2.as_default():
    X = tf.placeholder(dtype=tf.float32, shape=[...])
    Y = tf.placeholder(dtype=tf.float32, shape=[...])
    ...
    loss = ...
    step = ...

sess2 = tf.InteractiveSession(graph=g2)
tf.global_variables_initializer().run()

我不确定这是否可行。 第一个问题是,这些图的会话是不同的。因此,我无法在图 g1 中将 "logits" 作为 g2 的输入。

第二个问题 是 g2 需要一个元素数组 ("X"),但是当我将 "logits" 提供给 g2 时,它不会工作,因为它是一个张量。可以使用会话将其转换为 numpy 数组,但如何在图形中使用会话?我在创建图形后创建会话。

我需要你的建议来解决这些问题。提前致谢。

考虑以下示例。你有第一张图如下:

import tensorflow as tf

graph1 = tf.Graph()
with graph1.as_default():
    x1 = tf.placeholder(tf.float32, shape=[None, 2])
    y1 = tf.placeholder(tf.int32, shape=[None])

    with tf.name_scope('network'):
        logits1 = tf.layers.dense(x1, units=2)

    train_vars1 = tf.trainable_variables()

第二张图:

graph2 = tf.Graph()
with graph2.as_default():
    x2 = tf.placeholder(tf.float32, shape=[None, 2])
    y2 = tf.placeholder(tf.int32, shape=[None])

    with tf.name_scope('network'):
        logits2 = tf.layers.dense(x2, units=2)

    with tf.name_scope('loss'):
        xentropy2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y2, logits=logits2)
        loss_fn2 = tf.reduce_mean(xentropy2)

    with tf.name_scope('optimizer'):
        optimizer2 = tf.train.GradientDescentOptimizer(0.01)
        train_op2 = optimizer2.minimize(loss_fn2)
    train_vars2 = tf.trainable_variables()

现在您想将第一个图的 logits 层输出作为第二个图的输入。我们通过创建两个会话、初始化变量、评估第一个图的逻辑层,然后将评估值作为输入提供给第二个图来实现。我将使用玩具斑点数据集来说明:

from sklearn.datasets import make_blobs

x_train, y_train = make_blobs(n_samples=4,
                              n_features=2,
                              centers=[[1, 1], [-1, -1]],
                              cluster_std=0.5)
sess1 = tf.Session(graph=graph1)
sess2 = tf.Session(graph=graph2)

_ = sess1.run([v.initializer for v in train_vars1])
_ = sess2.run([v.initializer for v in train_vars2])

# feed the logits layer of graph1 as input to graph2
logits1_val = sess1.run(logits1, feed_dict={x1:x_train})
logits2_val = sess2.run(logits2, feed_dict={x2:logits1_val})
print(logits2_val)
# [[ 1.3904244   2.811252  ]
#  [-0.39521402 -1.6812694 ]
#  [-1.7728546  -4.522432  ]
#  [ 0.6836863   3.2234416 ]]

请注意,第一张图 (logits1_val) 的 logits 评估值已经是一个 numpy 数组,因此您可以按原样将其作为第二张图的输入。当你想为第二个图执行训练步骤时相同:

# train step for the second graph
logits1_val = sess1.run(logits1, feed_dict={x1:x_train})
loss_val2, _ = sess2.run([loss_fn2, train_op2], feed_dict={x2:logits1_val, y2:y_train})
print(loss_val2) # 0.8134985

更新 如果我们在同一张图中定义两个网络:

import tensorflow as tf
from sklearn.datasets import make_blobs

x_train, y_train = make_blobs(n_samples=4,
                              n_features=2,
                              centers=[[1, 1], [-1, -1]],
                              cluster_std=0.5)

with tf.variable_scope('network_1'):
    x = tf.placeholder(tf.float32, shape=[None, 2])
    y = tf.placeholder(tf.int32, shape=[None])

    with tf.name_scope('network'):
        logits1 = tf.layers.dense(x, units=2)

with tf.variable_scope('network_2'):
    with tf.name_scope('network'):
        logits2 = tf.layers.dense(logits1, units=2) # <-- output of `network_1` is input to `network_2`

    with tf.name_scope('custom_loss'):
        # Define your custom loss here. I use cross-entropy
        # for illustration
        xentropy2 = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits2)
        custom_loss2 = tf.reduce_mean(xentropy2)

    with tf.name_scope('optimizer'):
        optimizer2 = tf.train.GradientDescentOptimizer(0.01)
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope='network_2')
        train_op2 = optimizer2.minimize(custom_loss2, var_list=var_list)

with tf.variable_scope('network_1'):
    # Take the `custom_loss2` from `network_2` and create a new custom loss
    # for `network_1`
    xentropy1 = tf.nn.sparse_softmax_cross_entropy_with_logits(
        labels=y, logits=logits1)
    custom_loss1 = tf.reduce_mean(xentropy1) + custom_loss2 # <-- loss from `network_2`
    optimizer1 = tf.train.AdamOptimizer(0.01)
    var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope='network_1')
    train_op1 = optimizer1.minimize(custom_loss1, var_list=var_list)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    # grad update step + loss computation for first network
    loss1, _ = sess.run([custom_loss1, train_op1], feed_dict={x:x_train, y:y_train})
    print(loss1) # 0.44655064
    # grad update step + loss computation for second network
    loss2, _ = sess.run([custom_loss2, train_op2], feed_dict={x:x_train, y:y_train})
    print(loss2) # 0.3163877