eager execution 函数的输入不能是 Keras 符号张量
Inputs to eager execution function cannot be Keras symbolic tensors
我正在尝试在 tf.Keras
(TensorFlow 2.0.0rc0) 中为具有稀疏注释数据的 3-D U-Net 实施样本和像素相关的相关损失加权(Cicek 2016,arxiv:1606.06650).
这是我的代码:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
# disabling eager execution makes this example work:
# tf.python.framework_ops.disable_eager_execution()
def get_loss_fcn(w):
def loss_fcn(y_true, y_pred):
loss = w * losses.mse(y_true, y_pred)
return loss
return loss_fcn
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4)
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
w = layers.Input([4])
y = layers.Activation('tanh')(x)
model = models.Model(inputs=[x, w], outputs=y)
loss = get_loss_fcn(model.input[1])
# using another loss makes it work, too:
# loss = 'mse'
model.compile(loss=loss)
model.fit((data_x, data_w), data_y)
print('Done.')
这在禁用即时执行时运行良好,但 TensorFlow 2 的要点之一是默认具有即时执行。我和那个目标之间的障碍是自定义损失函数,如您所见(使用 'mse'
作为损失也消除了该错误):
File "MWE.py", line 30, in <module>
model.fit((data_x, data_w), data_y)
[...]
tensorflow.python.eager.core._SymbolicException: Inputs to eager execution function cannot be Keras symbolic tensors, but found [<tf.Tensor 'input_2:0' shape=(None, 4) dtype=float32>]
如何使这种结构与急切执行一起工作?
我的一个想法是将 w
连接到输出 y
并将 y_pred
分成原始的 y_pred
和 w
损失功能,但这是我想避免的黑客攻击。不过,它的工作原理是 # HERE
:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
# HERE
def loss_fcn(y_true, y_pred):
w = y_pred[:, :, -1] # HERE
y_pred = y_pred[:, :, :-1] # HERE
loss = w * losses.mse(y_true, y_pred)
return loss
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4, 1) # HERE
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
w = layers.Input([4, 1]) # HERE
y = layers.Activation('tanh')(x)
output = layers.Concatenate()([y, w]) # HERE
model = models.Model(inputs=[x, w], outputs=output) # HERE
loss = loss_fcn # HERE
model.compile(loss=loss)
model.fit((data_x, data_w), data_y)
print('Done.')
还有其他想法吗?
另一种解决方案是将权重作为附加输出特征而不是输入特征传递。
这使模型完全没有任何与权重相关的东西,并且权重仅出现在损失函数和 .fit()
调用中:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
data_x = 2 * np.ones((7, 11, 15, 3), dtype=float)
data_y = 5 * np.ones((7, 9, 13, 5), dtype=float)
x = layers.Input(data_x.shape[1:])
y = layers.Conv2D(5, kernel_size=3)(x)
model = models.Model(inputs=x, outputs=y)
def loss(y_true, y_pred):
(y_true, w) = tf.split(y_true, num_or_size_splits=[-1, 1], axis=-1)
loss = tf.squeeze(w, axis=-1) * losses.mse(y_true, y_pred)
tf.print(tf.math.reduce_mean(y_true), "== 5")
tf.print(tf.math.reduce_mean(w), "== 3")
return loss
model.compile(loss=loss)
data_w = 3 * np.ones((7, 9, 13, 1), dtype=float)
data_yw = np.concatenate((data_y, data_w), axis=-1)
model.fit(data_x, data_yw)
一个缺点仍然是在 numpy.stack()
中合并 y
和 w
时需要操作(可能)大型数组,因此将不胜感激更多类似 TensorFlow 的东西。
另一种方式:
from tensorflow.keras import layers, models, losses
import numpy as np
def loss_fcn(y_true, y_pred, w):
loss = w * losses.mse(y_true, y_pred)
return loss
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4)
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
y_true = layers.Input([4, 1])
w = layers.Input([4])
y = layers.Activation('tanh')(x)
model = models.Model(inputs=[x, y_true, w], outputs=y)
model.add_loss(loss_fcn(y, y_true, w))
model.compile()
model.fit((data_x, data_y, data_w))
我认为这是最优雅的解决方案。
如果你用
替换适合的行,你的代码在最新的 tensorflow (2.3) 上工作得很好
model.fit((data_x, data_y, data_w))
所以:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
# HERE
def loss_fcn(y_true, y_pred):
w = y_pred[:, :, -1] # HERE
y_pred = y_pred[:, :, :-1] # HERE
loss = w * losses.mse(y_true, y_pred)
return loss
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4, 1) # HERE
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
w = layers.Input([4, 1]) # HERE
y = layers.Activation('tanh')(x)
output = layers.Concatenate()([y, w]) # HERE
model = models.Model(inputs=[x, w], outputs=output) # HERE
loss = loss_fcn # HERE
model.compile(loss=loss)
model.fit((data_x, data_y, data_w))
print('Done.')
此外,我发现在损失函数中实现的tf.reduce_mean、K.mean、tf.square、tf.exp等也会导致同样的错误。
我正在尝试在 tf.Keras
(TensorFlow 2.0.0rc0) 中为具有稀疏注释数据的 3-D U-Net 实施样本和像素相关的相关损失加权(Cicek 2016,arxiv:1606.06650).
这是我的代码:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
# disabling eager execution makes this example work:
# tf.python.framework_ops.disable_eager_execution()
def get_loss_fcn(w):
def loss_fcn(y_true, y_pred):
loss = w * losses.mse(y_true, y_pred)
return loss
return loss_fcn
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4)
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
w = layers.Input([4])
y = layers.Activation('tanh')(x)
model = models.Model(inputs=[x, w], outputs=y)
loss = get_loss_fcn(model.input[1])
# using another loss makes it work, too:
# loss = 'mse'
model.compile(loss=loss)
model.fit((data_x, data_w), data_y)
print('Done.')
这在禁用即时执行时运行良好,但 TensorFlow 2 的要点之一是默认具有即时执行。我和那个目标之间的障碍是自定义损失函数,如您所见(使用 'mse'
作为损失也消除了该错误):
File "MWE.py", line 30, in <module>
model.fit((data_x, data_w), data_y)
[...]
tensorflow.python.eager.core._SymbolicException: Inputs to eager execution function cannot be Keras symbolic tensors, but found [<tf.Tensor 'input_2:0' shape=(None, 4) dtype=float32>]
如何使这种结构与急切执行一起工作?
我的一个想法是将 w
连接到输出 y
并将 y_pred
分成原始的 y_pred
和 w
损失功能,但这是我想避免的黑客攻击。不过,它的工作原理是 # HERE
:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
# HERE
def loss_fcn(y_true, y_pred):
w = y_pred[:, :, -1] # HERE
y_pred = y_pred[:, :, :-1] # HERE
loss = w * losses.mse(y_true, y_pred)
return loss
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4, 1) # HERE
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
w = layers.Input([4, 1]) # HERE
y = layers.Activation('tanh')(x)
output = layers.Concatenate()([y, w]) # HERE
model = models.Model(inputs=[x, w], outputs=output) # HERE
loss = loss_fcn # HERE
model.compile(loss=loss)
model.fit((data_x, data_w), data_y)
print('Done.')
还有其他想法吗?
另一种解决方案是将权重作为附加输出特征而不是输入特征传递。
这使模型完全没有任何与权重相关的东西,并且权重仅出现在损失函数和 .fit()
调用中:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
data_x = 2 * np.ones((7, 11, 15, 3), dtype=float)
data_y = 5 * np.ones((7, 9, 13, 5), dtype=float)
x = layers.Input(data_x.shape[1:])
y = layers.Conv2D(5, kernel_size=3)(x)
model = models.Model(inputs=x, outputs=y)
def loss(y_true, y_pred):
(y_true, w) = tf.split(y_true, num_or_size_splits=[-1, 1], axis=-1)
loss = tf.squeeze(w, axis=-1) * losses.mse(y_true, y_pred)
tf.print(tf.math.reduce_mean(y_true), "== 5")
tf.print(tf.math.reduce_mean(w), "== 3")
return loss
model.compile(loss=loss)
data_w = 3 * np.ones((7, 9, 13, 1), dtype=float)
data_yw = np.concatenate((data_y, data_w), axis=-1)
model.fit(data_x, data_yw)
一个缺点仍然是在 numpy.stack()
中合并 y
和 w
时需要操作(可能)大型数组,因此将不胜感激更多类似 TensorFlow 的东西。
另一种方式:
from tensorflow.keras import layers, models, losses
import numpy as np
def loss_fcn(y_true, y_pred, w):
loss = w * losses.mse(y_true, y_pred)
return loss
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4)
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
y_true = layers.Input([4, 1])
w = layers.Input([4])
y = layers.Activation('tanh')(x)
model = models.Model(inputs=[x, y_true, w], outputs=y)
model.add_loss(loss_fcn(y, y_true, w))
model.compile()
model.fit((data_x, data_y, data_w))
我认为这是最优雅的解决方案。
如果你用
替换适合的行,你的代码在最新的 tensorflow (2.3) 上工作得很好model.fit((data_x, data_y, data_w))
所以:
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, models
# HERE
def loss_fcn(y_true, y_pred):
w = y_pred[:, :, -1] # HERE
y_pred = y_pred[:, :, :-1] # HERE
loss = w * losses.mse(y_true, y_pred)
return loss
data_x = np.random.rand(5, 4, 1)
data_w = np.random.rand(5, 4, 1) # HERE
data_y = np.random.rand(5, 4, 1)
x = layers.Input([4, 1])
w = layers.Input([4, 1]) # HERE
y = layers.Activation('tanh')(x)
output = layers.Concatenate()([y, w]) # HERE
model = models.Model(inputs=[x, w], outputs=output) # HERE
loss = loss_fcn # HERE
model.compile(loss=loss)
model.fit((data_x, data_y, data_w))
print('Done.')
此外,我发现在损失函数中实现的tf.reduce_mean、K.mean、tf.square、tf.exp等也会导致同样的错误。