tf.while_loop 每次迭代具有灵活的行号
tf.while_loop with flexible row numbers per iteration
我正在尝试在 tf.while_loop
中填充二维数组。事情是我在每次迭代时计算的结果 returns 可变行数。 Tensorflow 似乎不允许这样做。
查看这个重现问题的最小示例:
indices = tf.constant([2, 5, 7, 9])
num_elems = tf.shape(indices)[0]
init_array = tf.TensorArray(tf.float64, size=num_elems)
initial_i = tf.constant(0, dtype='int32')
def loop_body(i, ta):
# Here if I choose a random rows number, it fails.
n_rows = tf.random_uniform((), minval=0, maxval=10, dtype=tf.int64)
# It works with a fixed row number.
# n_rows = 2
anchor = tf.random_normal((n_rows, 4))
ta = ta.write(i, tf.cast(anchor, tf.float64))
return i+1, ta
_, anchors= tf.while_loop(lambda i, ta: i < num_elems, loop_body, [initial_i, init_array])
anchors = anchors.stack()
anchors = tf.reshape(anchors, shape=(-1, 4))
anchors = tf.identity(anchors, name="anchors")
with tf.Session() as sess:
result = sess.run(anchors)
print(result)
它returns:
[[ 0.07496446 -0.32444516 -0.47164568 1.10953283]
[-0.78791034 1.87736523 0.99817699 0.45336106]
[-0.65860498 -1.1703862 -0.05761402 -0.17642537]
[ 0.49713874 1.01805222 0.60902107 0.85543454]
[-1.38755643 -0.70669901 0.34549037 -0.85984546]
[-1.32419562 0.71003789 0.34984082 -1.39001906]
[ 2.26691341 -0.63561141 0.38636214 0.02521387]
[-1.55348766 1.0176425 0.4889268 -0.12093868]]
我也愿意接受替代解决方案,以在每次迭代中使用可变行数填充循环中的张量。
这是一个嵌套的 while_loop
解决方案,它写入单个 TensorArray
:
import tensorflow as tf
def make_inner_loop_body(total_size, anchor):
def _inner_loop_body(j, ta):
return j + 1, ta.write(total_size + j, anchor[j])
return _inner_loop_body
def loop_body(i, total_size, ta):
n_rows = tf.random_uniform((), minval=0, maxval=10, dtype=tf.int32)
n_rows = tf.Print(n_rows, [n_rows])
anchor = tf.random_normal((n_rows, 4), dtype=tf.float64)
_, ta = tf.while_loop(lambda j, ta: j < n_rows,
make_inner_loop_body(total_size, anchor),
(tf.zeros([], dtype=tf.int32), ta))
return i+1, total_size + n_rows, ta
_, _, anchors= tf.while_loop(lambda i, total_size, ta: i < 4,
loop_body,
(tf.zeros([], dtype=tf.int32),
tf.zeros([], dtype=tf.int32),
tf.TensorArray(tf.float64, size=0,
dynamic_size=True)))
anchors = anchors.stack()
anchors = tf.reshape(anchors, shape=(-1, 4))
anchors = tf.identity(anchors, name="anchors")
with tf.Session() as sess:
result = sess.run(anchors)
print("Final shape", result.shape)
print(result)
这会打印如下内容:
[5]
[5]
[7]
[7]
Final shape (24, 4)
我假设 random_normal
需要在 while_loop
中处理是出于某种原因。否则写起来会容易得多:
import tensorflow as tf
n_rows = tf.random_uniform((4,), minval=0, maxval=10, dtype=tf.int32)
anchors = tf.random_normal((tf.reduce_sum(n_rows), 4), dtype=tf.float64)
with tf.Session() as sess:
result = sess.run(anchors)
print("Final shape", result.shape)
print(result)
我正在尝试在 tf.while_loop
中填充二维数组。事情是我在每次迭代时计算的结果 returns 可变行数。 Tensorflow 似乎不允许这样做。
查看这个重现问题的最小示例:
indices = tf.constant([2, 5, 7, 9])
num_elems = tf.shape(indices)[0]
init_array = tf.TensorArray(tf.float64, size=num_elems)
initial_i = tf.constant(0, dtype='int32')
def loop_body(i, ta):
# Here if I choose a random rows number, it fails.
n_rows = tf.random_uniform((), minval=0, maxval=10, dtype=tf.int64)
# It works with a fixed row number.
# n_rows = 2
anchor = tf.random_normal((n_rows, 4))
ta = ta.write(i, tf.cast(anchor, tf.float64))
return i+1, ta
_, anchors= tf.while_loop(lambda i, ta: i < num_elems, loop_body, [initial_i, init_array])
anchors = anchors.stack()
anchors = tf.reshape(anchors, shape=(-1, 4))
anchors = tf.identity(anchors, name="anchors")
with tf.Session() as sess:
result = sess.run(anchors)
print(result)
它returns:
[[ 0.07496446 -0.32444516 -0.47164568 1.10953283]
[-0.78791034 1.87736523 0.99817699 0.45336106]
[-0.65860498 -1.1703862 -0.05761402 -0.17642537]
[ 0.49713874 1.01805222 0.60902107 0.85543454]
[-1.38755643 -0.70669901 0.34549037 -0.85984546]
[-1.32419562 0.71003789 0.34984082 -1.39001906]
[ 2.26691341 -0.63561141 0.38636214 0.02521387]
[-1.55348766 1.0176425 0.4889268 -0.12093868]]
我也愿意接受替代解决方案,以在每次迭代中使用可变行数填充循环中的张量。
这是一个嵌套的 while_loop
解决方案,它写入单个 TensorArray
:
import tensorflow as tf
def make_inner_loop_body(total_size, anchor):
def _inner_loop_body(j, ta):
return j + 1, ta.write(total_size + j, anchor[j])
return _inner_loop_body
def loop_body(i, total_size, ta):
n_rows = tf.random_uniform((), minval=0, maxval=10, dtype=tf.int32)
n_rows = tf.Print(n_rows, [n_rows])
anchor = tf.random_normal((n_rows, 4), dtype=tf.float64)
_, ta = tf.while_loop(lambda j, ta: j < n_rows,
make_inner_loop_body(total_size, anchor),
(tf.zeros([], dtype=tf.int32), ta))
return i+1, total_size + n_rows, ta
_, _, anchors= tf.while_loop(lambda i, total_size, ta: i < 4,
loop_body,
(tf.zeros([], dtype=tf.int32),
tf.zeros([], dtype=tf.int32),
tf.TensorArray(tf.float64, size=0,
dynamic_size=True)))
anchors = anchors.stack()
anchors = tf.reshape(anchors, shape=(-1, 4))
anchors = tf.identity(anchors, name="anchors")
with tf.Session() as sess:
result = sess.run(anchors)
print("Final shape", result.shape)
print(result)
这会打印如下内容:
[5]
[5]
[7]
[7]
Final shape (24, 4)
我假设 random_normal
需要在 while_loop
中处理是出于某种原因。否则写起来会容易得多:
import tensorflow as tf
n_rows = tf.random_uniform((4,), minval=0, maxval=10, dtype=tf.int32)
anchors = tf.random_normal((tf.reduce_sum(n_rows), 4), dtype=tf.float64)
with tf.Session() as sess:
result = sess.run(anchors)
print("Final shape", result.shape)
print(result)