KerasTensor和Tensor如何处理?
How to deal with KerasTensor and Tensor?
我正在尝试创建变分自动编码器,这意味着我需要自定义损失函数。问题是在损失函数内部我有两种不同的损失——mse 和发散。 mse 是 Tensor,divergence 是 KerasTensor(因为色散和 mu,我从编码器中出来了)。我收到这样的错误:
TypeError: Cannot convert a symbolic Keras input/output to a numpy
array. This error may indicate that you're trying to pass a symbolic
value to a NumPy call, which is not supported. Or, you may be trying
to pass Keras symbolic inputs/outputs to a TF API that does not
register dispatching, preventing Keras from automatically converting
the API call to a lambda layer in the Functional Model.
这是我的架构:
import tensorflow.keras as keras
from keras.layers import Input, Dense, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose
from keras.models import Model
import tensorflow as tf
import keras.backend as K
encoded_dim = 2
class Sampling(keras.layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
img = Input((28,28,1), name='img')
x = Conv2D(32, (3,3), padding='same', activation='relu')(img)
x = MaxPooling2D()(x)
x = Conv2D(64, (3,3), padding='same', activation='relu')(x)
x = MaxPooling2D()(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
mu = Dense(encoded_dim, name='mu')(x)
sigma = Dense(encoded_dim, name='sigma')(x)
z = Sampling()([mu,sigma])
# print(mu)
xx = Input((encoded_dim,))
x = Dense(7*7*64, activation='relu')(xx)
x = Reshape((7,7,64))(x)
x = Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
out = Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
encoder = Model(img,z, name='encoder')
decoder = Model(xx,out,name='decoder')
autoencoder = Model(img,decoder(encoder(img)),name='autoencoder')
以及损失函数:
def vae_loss(x, y):
loss = tf.reduce_mean(K.square(x-y))
kl_loss = -0.5 * tf.reduce_mean(1 + sigma - tf.square(mu) - tf.exp(sigma))
print(type(loss))
print(type(kl_loss))
return loss + kl_loss
autoencoder.compile(optimizer='adam',
loss = vae_loss)
autoencoder.fit(train,train,
epochs=1,
batch_size=60,
shuffle=True,
verbose = 2)
损失类型和lk_loss:
class 'tensorflow.python.framework.ops.Tensor'
class 'tensorflow.python.keras.engine.keras_tensor.KerasTensor'
你需要将 mu
和 sigma
传递给你的损失函数。 vae_loss
现在接受 4 个输入:
def vae_loss(x, y, mu, sigma):
loss = tf.reduce_mean(K.square(x-y))
kl_loss = -0.5 * tf.reduce_mean(1 + sigma - tf.square(mu) - tf.exp(sigma))
return loss + kl_loss
您可以在模型中使用它,只需使用 autoencoder.add_loss
。
同样重要的是 encoder
returns 不仅 z
而且 mu
和 sigma
.
z, mu, sigma = encoder(img)
out = decoder(z)
autoencoder = Model(img, out, name='autoencoder')
autoencoder.add_loss(vae_loss(img, out, mu, sigma)) # <======= add_loss
autoencoder.compile(loss=None, optimizer='adam')
这里是 运行 笔记本 https://colab.research.google.com/drive/1r5lMZ2Dc_Lq4KJDqiirXla1qfDVmdwxW?usp=sharing
我正在尝试创建变分自动编码器,这意味着我需要自定义损失函数。问题是在损失函数内部我有两种不同的损失——mse 和发散。 mse 是 Tensor,divergence 是 KerasTensor(因为色散和 mu,我从编码器中出来了)。我收到这样的错误:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
这是我的架构:
import tensorflow.keras as keras
from keras.layers import Input, Dense, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Conv2DTranspose
from keras.models import Model
import tensorflow as tf
import keras.backend as K
encoded_dim = 2
class Sampling(keras.layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
img = Input((28,28,1), name='img')
x = Conv2D(32, (3,3), padding='same', activation='relu')(img)
x = MaxPooling2D()(x)
x = Conv2D(64, (3,3), padding='same', activation='relu')(x)
x = MaxPooling2D()(x)
x = Flatten()(x)
x = Dense(16, activation='relu')(x)
mu = Dense(encoded_dim, name='mu')(x)
sigma = Dense(encoded_dim, name='sigma')(x)
z = Sampling()([mu,sigma])
# print(mu)
xx = Input((encoded_dim,))
x = Dense(7*7*64, activation='relu')(xx)
x = Reshape((7,7,64))(x)
x = Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
out = Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
encoder = Model(img,z, name='encoder')
decoder = Model(xx,out,name='decoder')
autoencoder = Model(img,decoder(encoder(img)),name='autoencoder')
以及损失函数:
def vae_loss(x, y):
loss = tf.reduce_mean(K.square(x-y))
kl_loss = -0.5 * tf.reduce_mean(1 + sigma - tf.square(mu) - tf.exp(sigma))
print(type(loss))
print(type(kl_loss))
return loss + kl_loss
autoencoder.compile(optimizer='adam',
loss = vae_loss)
autoencoder.fit(train,train,
epochs=1,
batch_size=60,
shuffle=True,
verbose = 2)
损失类型和lk_loss:
class 'tensorflow.python.framework.ops.Tensor'
class 'tensorflow.python.keras.engine.keras_tensor.KerasTensor'
你需要将 mu
和 sigma
传递给你的损失函数。 vae_loss
现在接受 4 个输入:
def vae_loss(x, y, mu, sigma):
loss = tf.reduce_mean(K.square(x-y))
kl_loss = -0.5 * tf.reduce_mean(1 + sigma - tf.square(mu) - tf.exp(sigma))
return loss + kl_loss
您可以在模型中使用它,只需使用 autoencoder.add_loss
。
同样重要的是 encoder
returns 不仅 z
而且 mu
和 sigma
.
z, mu, sigma = encoder(img)
out = decoder(z)
autoencoder = Model(img, out, name='autoencoder')
autoencoder.add_loss(vae_loss(img, out, mu, sigma)) # <======= add_loss
autoencoder.compile(loss=None, optimizer='adam')
这里是 运行 笔记本 https://colab.research.google.com/drive/1r5lMZ2Dc_Lq4KJDqiirXla1qfDVmdwxW?usp=sharing