为什么惩罚不会改变 Keras 模型的预测?
Why does a penalty not change the predictions of a Keras model?
我最近在尝试实现自定义损失函数时遇到了这个问题。以下两个损失函数产生完全相同的结果,即使在第二个损失函数中添加了一个大的随机值 returns 并确保了 jupyter notebook 中的可再现性。知道这是为什么吗?
def customLoss1():
def binary_crossentropy1(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy()
bce = K.mean(bin_cross(y_true, y_pred))
return bce
return binary_crossentropy1
def customLoss2():
def binary_crossentropy2(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy()
bce = K.mean(bin_cross(y_true, y_pred)) + tf.random.normal([], mean=0.0, stddev=10.0)
return bce
return binary_crossentropy2
你的错误一定是在别的地方,因为你发布的损失函数确实会产生不同的结果:
import tensorflow as tf
tf.random.set_seed(11)
def binary_crossentropy1(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bce = tf.keras.backend.mean(bin_cross(y_true, y_pred))
return bce
def binary_crossentropy2(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bce = tf.keras.backend.mean(bin_cross(y_true, y_pred)) + tf.random.normal([], mean=0.0, stddev=10.0)
return bce
y_true = tf.constant([0, 1, 0, 0])
y_pred = tf.constant([-18.6, 0.51, 2.94, -12.8])
print(binary_crossentropy1(y_true, y_pred))
print(binary_crossentropy2(y_true, y_pred))
tf.Tensor(0.865458, shape=(), dtype=float32)
tf.Tensor(-14.364014, shape=(), dtype=float32)
我最近在尝试实现自定义损失函数时遇到了这个问题。以下两个损失函数产生完全相同的结果,即使在第二个损失函数中添加了一个大的随机值 returns 并确保了 jupyter notebook 中的可再现性。知道这是为什么吗?
def customLoss1():
def binary_crossentropy1(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy()
bce = K.mean(bin_cross(y_true, y_pred))
return bce
return binary_crossentropy1
def customLoss2():
def binary_crossentropy2(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy()
bce = K.mean(bin_cross(y_true, y_pred)) + tf.random.normal([], mean=0.0, stddev=10.0)
return bce
return binary_crossentropy2
你的错误一定是在别的地方,因为你发布的损失函数确实会产生不同的结果:
import tensorflow as tf
tf.random.set_seed(11)
def binary_crossentropy1(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bce = tf.keras.backend.mean(bin_cross(y_true, y_pred))
return bce
def binary_crossentropy2(y_true, y_pred):
bin_cross = tf.keras.losses.BinaryCrossentropy(from_logits=True)
bce = tf.keras.backend.mean(bin_cross(y_true, y_pred)) + tf.random.normal([], mean=0.0, stddev=10.0)
return bce
y_true = tf.constant([0, 1, 0, 0])
y_pred = tf.constant([-18.6, 0.51, 2.94, -12.8])
print(binary_crossentropy1(y_true, y_pred))
print(binary_crossentropy2(y_true, y_pred))
tf.Tensor(0.865458, shape=(), dtype=float32)
tf.Tensor(-14.364014, shape=(), dtype=float32)