名称 "Generator" 在模型中使用了 2 次。所有图层名称应该是唯一的

The name "Generator" is used 2 times in the model. All layer names should be unique

我正在尝试根据此 reference 为未配对的图像到图像的转换制作一个循环 GAN。尝试编译组合模型时,遇到以下错误。我不知道为什么会这样,因为我使用了与参考资料相同的配置。附件是我的代码。如果你们中的任何人可以解决我的问题,请进行审查。提前致谢。抱歉我的英语不好。

from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization

img_rows, img_columns, channels = 256, 256, 1

img_shape = (img_rows, img_columns, channels)

def Generator():
  inputs = Input(img_shape)
  conv1 = Conv2D(64, (4, 4), strides=2, padding='same')(inputs)   # 128
  conv1 = Activation(LeakyReLU(alpha=0.2))(conv1)
  conv1 = InstanceNormalization()(conv1)


  conv2 = Conv2D(128, (4, 4), strides=2, padding='same')(conv1)   # 64
  conv2 = Activation(LeakyReLU(alpha=0.2))(conv2)
  conv2 = InstanceNormalization()(conv2)

  conv3 = Conv2D(256, (4, 4), strides=2, padding='same')(conv2)   # 32
  conv3 = Activation(LeakyReLU(alpha=0.2))(conv3)
  conv3 = InstanceNormalization()(conv3)


  Deconv3 = concatenate([Conv2DTranspose(256, (4, 4), strides=2, padding='same')(conv3), conv2], axis=-1)  # 64
  Deconv3 = InstanceNormalization()(Deconv3)
  Deconv3 = Dropout(0.2)(Deconv3)
  Deconv3 = Activation('relu')(Deconv3)

  Deconv2 = concatenate([Conv2DTranspose(128, (4, 4), strides=2, padding='same')(Deconv3), conv1], axis=-1)   # 128
  Deconv2 = InstanceNormalization()(Deconv2)
  Deconv2 = Dropout(0.2)(Deconv2)
  Deconv2 = Activation('relu')(Deconv2)

  Deconv1 = UpSampling2D(size=(2, 2))(Deconv2)   # 256
  Deconv1 = Conv2D(1, (4, 4), strides=1, padding='same')(Deconv1)
  outputs = Activation('tanh')(Deconv1)

  return Model(inputs=inputs, outputs=outputs, name='Generator')

def Discriminator():
  inputs = Input(img_shape)
  conv1 = Conv2D(64, (4, 4), strides=2, padding='same')(inputs)   # 128
  conv1 = Activation(LeakyReLU(alpha=0.2))(conv1)
  conv1 = InstanceNormalization()(conv1)


  conv2 = Conv2D(128, (4, 4), strides=2, padding='same')(conv1)   # 64
  conv2 = Activation(LeakyReLU(alpha=0.2))(conv2)
  conv2 = InstanceNormalization()(conv2)

  conv3 = Conv2D(256, (4, 4), strides=2, padding='same')(conv2)   # 32
  conv3 = Activation(LeakyReLU(alpha=0.2))(conv3)
  conv3 = InstanceNormalization()(conv3)

  conv4 = Conv2D(256, (4, 4), strides=2, padding='same')(conv3)   # 16
  conv4 = Activation(LeakyReLU(alpha=0.2))(conv4)
  conv4 = InstanceNormalization()(conv4)

  conv5 = Conv2D(512, (4, 4), strides=2, padding='same')(conv4)   # 8
  conv5 = Activation(LeakyReLU(alpha=0.2))(conv5)
  conv5 = InstanceNormalization()(conv5)

  conv6 = Conv2D(512, (4, 4), strides=2, padding='same')(conv5)   # 4
  conv6 = Activation(LeakyReLU(alpha=0.2))(conv6)
  conv6 = InstanceNormalization()(conv6)

  outputs = Conv2D(1, (4, 4), strides=1, padding='same')(conv6)   # 4

  return Model(inputs=inputs, outputs=outputs, name='Discriminator')

# Calculate output shape of D (PatchGAN)
patch = int(height / 2**6)
disc_patch = (patch, patch, 1)

# Loss weights
lambda_cycle = 10.0                    # Cycle-consistency loss
lambda_id = 0.1 * lambda_cycle         # Identity loss
optimizer = Adam(0.0002, 0.5)

# Build and compile the discriminators
d_A = Discriminator()
d_B = Discriminator()
d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

# Build the generators
g_AB = Generator()
g_BA = Generator()

# Input images from both domains
img_A = Input(shape=img_shape)
img_B = Input(shape=img_shape)

# Translate images to the other domain
fake_B = g_AB(img_A)
fake_A = g_BA(img_B)

# Translate images back to original domain
reconstr_A = g_BA(fake_B)
reconstr_B = g_AB(fake_A)

# Identity mapping of images
img_A_id = g_BA(img_A)
img_B_id = g_AB(img_B)

# For the combined model we will only train the generators
d_A.trainable = False
d_B.trainable = False

# Discriminators determines validity of translated images
valid_A = d_A(fake_A)
valid_B = d_B(fake_B)

# Combined model trains generators to fool discriminators
combined = Model(inputs=[img_A, img_B], outputs=[ valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id ])
combined.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],loss_weights=[  1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id ], optimizer=optimizer)

错误是

The name "Generator" is used 2 times in the model. All layer names should be unique.

这些行是 Generator 和 Discriminator 方法中问题的原因,因为它们被调用两次,导致重名问题。在每次调用时生成唯一名称或不提供名称参数。

return Model(inputs=inputs, outputs=outputs, name='Generator')

return Model(inputs=inputs, outputs=outputs, name='Discriminator')

一个可能的解决方案:

return Model(inputs=inputs, outputs=outputs)