TensorFlow Sigmoid 交叉熵与一维数据的 Logits
TensorFlow Sigmoid Cross Entropy with Logits for 1D data
上下文
假设我们有一些一维数据(例如时间序列),其中所有序列的长度都是固定的l:
# [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] index
example = [ 0, 1, 1, 0, 23, 22, 20, 14, 9, 2, 0, 0] # l = 12
我们要进行语义分割,用n 类:
# [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] index
labeled = [
[ 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # class 1
[ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], # class 2
[ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], # class 3
#[ ... ],
[ 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1], # class n
]
那么单个示例的输出具有 [n, l]
的形状(即 data_format
不是 "channels_last"
)并且批处理输出具有 [b, n, l]
的形状,其中 b
是批次中的示例数。
这些类是独立的,所以我的理解是使用sigmoid交叉熵作为损失而不是softmax交叉熵适用于此。
问题
关于 tf.nn.sigmoid_cross_entropy_with_logits
的预期格式和使用,我有一些小的相关问题:
由于网络输出的张量与批处理标签的形状相同,我应该在它输出 logits 的假设下训练网络,还是采用 keras 方法(参见 keras 的 binary_crossentropy
) 并假设它输出概率?
考虑到 1d 分割问题,我应该调用 tf.nn.sigmoid_cross_entropy_with_logits
吗:
data_format='channels_first'
(如上图),或者
data_format='channels_last'
(example.T)
如果我想为每个频道单独分配标签?
传给优化器的loss操作应该是:
tf.nn.sigmoid_cross_entropy_with_logits(labels, logits)
、
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels, logits))
,或
tf.losses.sigmoid_cross_entropy
?
代码
这个 Colab 突出了我的困惑,并证明了 data_format
实际上很重要......,但文档没有明确说明这是预期的。
虚拟数据
c = 5 # number of channels (label classes)
p = 10 # number of positions ('pixels')
# data_format = 'channels_first', shape = [classes, pixels]
# 'logits' for 2 examples
pred_1 = np.array([[random.random() for v in range(p)]for n in range(c)]).astype(float)
pred_2 = np.array([[random.random() for v in range(p)]for n in range(c)]).astype(float)
# 'ground truth' for the above 2 examples
targ_1 = np.array([[0 if random.random() < 0.8 else 1 for v in range(p)]for n in range(c)]).astype(float)
targ_2 = np.array([[0 if random.random() < 0.8 else 1 for v in range(p)]for n in range(c)]).astype(float)
# batched form of the above examples
preds = np.array([pred_1, pred_2])
targs = np.array([targ_1, targ_2])
# data_format = 'channels_last', shape = [pixels, classes]
t_pred_1 = pred_1.T
t_pred_2 = pred_2.T
t_targ_1 = targ_1.T
t_targ_2 = targ_2.T
t_preds = np.array([t_pred_1, t_pred_2])
t_targs = np.array([t_targ_1, t_targ_2])
损失
tf.nn
# calculate individual losses for 'channels_first'
loss_1 = tf.nn.sigmoid_cross_entropy_with_logits(labels=targ_1, logits=pred_1)
loss_2 = tf.nn.sigmoid_cross_entropy_with_logits(labels=targ_2, logits=pred_2)
# calculate batch loss for 'channels_first'
b_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targs, logits=preds)
# calculate individual losses for 'channels_last'
t_loss_1 = tf.nn.sigmoid_cross_entropy_with_logits(labels=t_targ_1, logits=t_pred_1)
t_loss_2 = tf.nn.sigmoid_cross_entropy_with_logits(labels=t_targ_2, logits=t_pred_2)
# calculate batch loss for 'channels_last'
t_b_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=t_targs, logits=t_preds)
# get actual tensors
with tf.Session() as sess:
# loss for 'channels_first'
l1 = sess.run(loss_1)
l2 = sess.run(loss_2)
# batch loss for 'channels_first'
bl = sess.run(b_loss)
# loss for 'channels_last'
t_l1 = sess.run(t_loss_1)
t_l2 = sess.run(t_loss_2)
# batch loss for 'channels_last'
t_bl = sess.run(t_b_loss)
tf.reduced_mean(tf.nn)
# calculate individual losses for 'channels_first'
rm_loss_1 = tf.reduce_mean(loss_1)
rm_loss_2 = tf.reduce_mean(loss_2)
# calculate batch loss for 'channels_first'
rm_b_loss = tf.reduce_mean(b_loss)
# calculate individual losses for 'channels_last'
rm_t_loss_1 = tf.reduce_mean(t_loss_1)
rm_t_loss_2 = tf.reduce_mean(t_loss_2)
# calculate batch loss for 'channels_last'
rm_t_b_loss = tf.reduce_mean(t_b_loss)
# get actual tensors
with tf.Session() as sess:
# loss for 'channels_first'
rm_l1 = sess.run(rm_loss_1)
rm_l2 = sess.run(rm_loss_2)
# batch loss for 'channels_first'
rm_bl = sess.run(rm_b_loss)
# loss for 'channels_last'
rm_t_l1 = sess.run(rm_t_loss_1)
rm_t_l2 = sess.run(rm_t_loss_2)
# batch loss for 'channels_last'
rm_t_bl = sess.run(rm_t_b_loss)
tf.losses
# calculate individual losses for 'channels_first'
tf_loss_1 = tf.losses.sigmoid_cross_entropy(multi_class_labels=targ_1, logits=pred_1)
tf_loss_2 = tf.losses.sigmoid_cross_entropy(multi_class_labels=targ_2, logits=pred_2)
# calculate batch loss for 'channels_first'
tf_b_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=targs, logits=preds)
# calculate individual losses for 'channels_last'
tf_t_loss_1 = tf.losses.sigmoid_cross_entropy(multi_class_labels=t_targ_1, logits=t_pred_1)
tf_t_loss_2 = tf.losses.sigmoid_cross_entropy(multi_class_labels=t_targ_2, logits=t_pred_2)
# calculate batch loss for 'channels_last'
tf_t_b_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=t_targs, logits=t_preds)
# get actual tensors
with tf.Session() as sess:
# loss for 'channels_first'
tf_l1 = sess.run(tf_loss_1)
tf_l2 = sess.run(tf_loss_2)
# batch loss for 'channels_first'
tf_bl = sess.run(tf_b_loss)
# loss for 'channels_last'
tf_t_l1 = sess.run(tf_t_loss_1)
tf_t_l2 = sess.run(tf_t_loss_2)
# batch loss for 'channels_last'
tf_t_bl = sess.run(tf_t_b_loss)
测试等效性
data_format等值
# loss _should_(?) be the same for 'channels_first' and 'channels_last' data_format
# test example_1
e1 = (l1 == t_l1.T).all()
# test example 2
e2 = (l2 == t_l2.T).all()
# loss calculated for each example and then batched together should be the same
# as the loss calculated on the batched examples
ea = (np.array([l1, l2]) == bl).all()
t_ea = (np.array([t_l1, t_l2]) == t_bl).all()
# loss calculated on the batched examples for 'channels_first' should be the same
# as loss calculated on the batched examples for 'channels_last'
eb = (bl == np.transpose(t_bl, (0, 2, 1))).all()
e1, e2, ea, t_ea, eb
# (True, False, False, False, True) <- changes every time, so True is happenstance
tf.reduce_mean 和 tf.losses
之间的等效性
l_e1 = tf_l1 == rm_l1
l_e2 = tf_l2 == rm_l2
l_eb = tf_bl == rm_bl
l_t_e1 = tf_t_l1 == rm_t_l1
l_t_e2 = tf_t_l2 == rm_t_l2
l_t_eb = tf_t_bl == rm_t_bl
l_e1, l_e2, l_eb, l_t_e1, l_t_e2, l_t_eb
# (False, False, False, False, False, False)
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(...))
和 tf.losses.sigmoid_cross_entropy(...)
(使用默认参数)都在计算相同的东西。问题出在您使用 ==
比较两个浮点数的测试中。相反,使用 np.isclose
方法来检查两个浮点数是否相等:
# loss _should_(?) be the same for 'channels_first' and 'channels_last' data_format
# test example_1
e1 = np.isclose(l1, t_l1.T).all()
# test example 2
e2 = np.isclose(l2, t_l2.T).all()
# loss calculated for each example and then batched together should be the same
# as the loss calculated on the batched examples
ea = np.isclose(np.array([l1, l2]), bl).all()
t_ea = np.isclose(np.array([t_l1, t_l2]), t_bl).all()
# loss calculated on the batched examples for 'channels_first' should be the same
# as loss calculated on the batched examples for 'channels_last'
eb = np.isclose(bl, np.transpose(t_bl, (0, 2, 1))).all()
e1, e2, ea, t_ea, eb
# (True, True, True, True, True)
并且:
l_e1 = np.isclose(tf_l1, rm_l1)
l_e2 = np.isclose(tf_l2, rm_l2)
l_eb = np.isclose(tf_bl, rm_bl)
l_t_e1 = np.isclose(tf_t_l1, rm_t_l1)
l_t_e2 = np.isclose(tf_t_l2, rm_t_l2)
l_t_eb = np.isclose(tf_t_bl, rm_t_bl)
l_e1, l_e2, l_eb, l_t_e1, l_t_e2, l_t_eb
# (True, True, True, True, True, True)
上下文
假设我们有一些一维数据(例如时间序列),其中所有序列的长度都是固定的l:
# [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] index
example = [ 0, 1, 1, 0, 23, 22, 20, 14, 9, 2, 0, 0] # l = 12
我们要进行语义分割,用n 类:
# [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] index
labeled = [
[ 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # class 1
[ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], # class 2
[ 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0], # class 3
#[ ... ],
[ 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1], # class n
]
那么单个示例的输出具有 [n, l]
的形状(即 data_format
不是 "channels_last"
)并且批处理输出具有 [b, n, l]
的形状,其中 b
是批次中的示例数。
这些类是独立的,所以我的理解是使用sigmoid交叉熵作为损失而不是softmax交叉熵适用于此。
问题
关于 tf.nn.sigmoid_cross_entropy_with_logits
的预期格式和使用,我有一些小的相关问题:
由于网络输出的张量与批处理标签的形状相同,我应该在它输出 logits 的假设下训练网络,还是采用 keras 方法(参见 keras 的
binary_crossentropy
) 并假设它输出概率?考虑到 1d 分割问题,我应该调用
tf.nn.sigmoid_cross_entropy_with_logits
吗:data_format='channels_first'
(如上图),或者data_format='channels_last'
(example.T)
如果我想为每个频道单独分配标签?
传给优化器的loss操作应该是:
tf.nn.sigmoid_cross_entropy_with_logits(labels, logits)
、tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels, logits))
,或tf.losses.sigmoid_cross_entropy
?
代码
这个 Colab 突出了我的困惑,并证明了 data_format
实际上很重要......,但文档没有明确说明这是预期的。
虚拟数据
c = 5 # number of channels (label classes)
p = 10 # number of positions ('pixels')
# data_format = 'channels_first', shape = [classes, pixels]
# 'logits' for 2 examples
pred_1 = np.array([[random.random() for v in range(p)]for n in range(c)]).astype(float)
pred_2 = np.array([[random.random() for v in range(p)]for n in range(c)]).astype(float)
# 'ground truth' for the above 2 examples
targ_1 = np.array([[0 if random.random() < 0.8 else 1 for v in range(p)]for n in range(c)]).astype(float)
targ_2 = np.array([[0 if random.random() < 0.8 else 1 for v in range(p)]for n in range(c)]).astype(float)
# batched form of the above examples
preds = np.array([pred_1, pred_2])
targs = np.array([targ_1, targ_2])
# data_format = 'channels_last', shape = [pixels, classes]
t_pred_1 = pred_1.T
t_pred_2 = pred_2.T
t_targ_1 = targ_1.T
t_targ_2 = targ_2.T
t_preds = np.array([t_pred_1, t_pred_2])
t_targs = np.array([t_targ_1, t_targ_2])
损失
tf.nn
# calculate individual losses for 'channels_first'
loss_1 = tf.nn.sigmoid_cross_entropy_with_logits(labels=targ_1, logits=pred_1)
loss_2 = tf.nn.sigmoid_cross_entropy_with_logits(labels=targ_2, logits=pred_2)
# calculate batch loss for 'channels_first'
b_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targs, logits=preds)
# calculate individual losses for 'channels_last'
t_loss_1 = tf.nn.sigmoid_cross_entropy_with_logits(labels=t_targ_1, logits=t_pred_1)
t_loss_2 = tf.nn.sigmoid_cross_entropy_with_logits(labels=t_targ_2, logits=t_pred_2)
# calculate batch loss for 'channels_last'
t_b_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=t_targs, logits=t_preds)
# get actual tensors
with tf.Session() as sess:
# loss for 'channels_first'
l1 = sess.run(loss_1)
l2 = sess.run(loss_2)
# batch loss for 'channels_first'
bl = sess.run(b_loss)
# loss for 'channels_last'
t_l1 = sess.run(t_loss_1)
t_l2 = sess.run(t_loss_2)
# batch loss for 'channels_last'
t_bl = sess.run(t_b_loss)
tf.reduced_mean(tf.nn)
# calculate individual losses for 'channels_first'
rm_loss_1 = tf.reduce_mean(loss_1)
rm_loss_2 = tf.reduce_mean(loss_2)
# calculate batch loss for 'channels_first'
rm_b_loss = tf.reduce_mean(b_loss)
# calculate individual losses for 'channels_last'
rm_t_loss_1 = tf.reduce_mean(t_loss_1)
rm_t_loss_2 = tf.reduce_mean(t_loss_2)
# calculate batch loss for 'channels_last'
rm_t_b_loss = tf.reduce_mean(t_b_loss)
# get actual tensors
with tf.Session() as sess:
# loss for 'channels_first'
rm_l1 = sess.run(rm_loss_1)
rm_l2 = sess.run(rm_loss_2)
# batch loss for 'channels_first'
rm_bl = sess.run(rm_b_loss)
# loss for 'channels_last'
rm_t_l1 = sess.run(rm_t_loss_1)
rm_t_l2 = sess.run(rm_t_loss_2)
# batch loss for 'channels_last'
rm_t_bl = sess.run(rm_t_b_loss)
tf.losses
# calculate individual losses for 'channels_first'
tf_loss_1 = tf.losses.sigmoid_cross_entropy(multi_class_labels=targ_1, logits=pred_1)
tf_loss_2 = tf.losses.sigmoid_cross_entropy(multi_class_labels=targ_2, logits=pred_2)
# calculate batch loss for 'channels_first'
tf_b_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=targs, logits=preds)
# calculate individual losses for 'channels_last'
tf_t_loss_1 = tf.losses.sigmoid_cross_entropy(multi_class_labels=t_targ_1, logits=t_pred_1)
tf_t_loss_2 = tf.losses.sigmoid_cross_entropy(multi_class_labels=t_targ_2, logits=t_pred_2)
# calculate batch loss for 'channels_last'
tf_t_b_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=t_targs, logits=t_preds)
# get actual tensors
with tf.Session() as sess:
# loss for 'channels_first'
tf_l1 = sess.run(tf_loss_1)
tf_l2 = sess.run(tf_loss_2)
# batch loss for 'channels_first'
tf_bl = sess.run(tf_b_loss)
# loss for 'channels_last'
tf_t_l1 = sess.run(tf_t_loss_1)
tf_t_l2 = sess.run(tf_t_loss_2)
# batch loss for 'channels_last'
tf_t_bl = sess.run(tf_t_b_loss)
测试等效性
data_format等值
# loss _should_(?) be the same for 'channels_first' and 'channels_last' data_format
# test example_1
e1 = (l1 == t_l1.T).all()
# test example 2
e2 = (l2 == t_l2.T).all()
# loss calculated for each example and then batched together should be the same
# as the loss calculated on the batched examples
ea = (np.array([l1, l2]) == bl).all()
t_ea = (np.array([t_l1, t_l2]) == t_bl).all()
# loss calculated on the batched examples for 'channels_first' should be the same
# as loss calculated on the batched examples for 'channels_last'
eb = (bl == np.transpose(t_bl, (0, 2, 1))).all()
e1, e2, ea, t_ea, eb
# (True, False, False, False, True) <- changes every time, so True is happenstance
tf.reduce_mean 和 tf.losses
之间的等效性l_e1 = tf_l1 == rm_l1
l_e2 = tf_l2 == rm_l2
l_eb = tf_bl == rm_bl
l_t_e1 = tf_t_l1 == rm_t_l1
l_t_e2 = tf_t_l2 == rm_t_l2
l_t_eb = tf_t_bl == rm_t_bl
l_e1, l_e2, l_eb, l_t_e1, l_t_e2, l_t_eb
# (False, False, False, False, False, False)
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(...))
和 tf.losses.sigmoid_cross_entropy(...)
(使用默认参数)都在计算相同的东西。问题出在您使用 ==
比较两个浮点数的测试中。相反,使用 np.isclose
方法来检查两个浮点数是否相等:
# loss _should_(?) be the same for 'channels_first' and 'channels_last' data_format
# test example_1
e1 = np.isclose(l1, t_l1.T).all()
# test example 2
e2 = np.isclose(l2, t_l2.T).all()
# loss calculated for each example and then batched together should be the same
# as the loss calculated on the batched examples
ea = np.isclose(np.array([l1, l2]), bl).all()
t_ea = np.isclose(np.array([t_l1, t_l2]), t_bl).all()
# loss calculated on the batched examples for 'channels_first' should be the same
# as loss calculated on the batched examples for 'channels_last'
eb = np.isclose(bl, np.transpose(t_bl, (0, 2, 1))).all()
e1, e2, ea, t_ea, eb
# (True, True, True, True, True)
并且:
l_e1 = np.isclose(tf_l1, rm_l1)
l_e2 = np.isclose(tf_l2, rm_l2)
l_eb = np.isclose(tf_bl, rm_bl)
l_t_e1 = np.isclose(tf_t_l1, rm_t_l1)
l_t_e2 = np.isclose(tf_t_l2, rm_t_l2)
l_t_eb = np.isclose(tf_t_bl, rm_t_bl)
l_e1, l_e2, l_eb, l_t_e1, l_t_e2, l_t_eb
# (True, True, True, True, True, True)