Keras 模型通过使用 resize 输出具有特定大小的模型
Keras model output a model with a specific size by using resize
在 GAN 的生成器模型中,我正在尝试生成特定大小的图像。我的目标尺寸是 28x280x3
。实际上,到目前为止,我正在创建 28x28x3
的生成器输出。因此,我正在尝试使用 UpSampling2D
来增加模型的大小。在三个 UpSampling2D 层之后,我能够使模型输出大小为 28x224x3
。不过,我的目标是28x280x3
。我怎样才能缩小分歧维度?我注意到有一种方法是针对调整图层大小的。它如何适用于我的情况?我的代码如下所示:
def build_generator_face(latent_dim, channels, face_sequence):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_shape=(None, latent_dim)))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
if face_sequence == False:
#model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
#model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
else:
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
pdb.set_trace()
model.add(Reshape((-1,3), input_shape=(28,224,3)))
model.add(Lambda(lambda x: x[:7840])) # throw away some, so that #data = 224^2
model.add(Reshape(28,280,3)) # this line gives me an error but am not sure if it is necessary or not the code is found in here:
model.add(Conv2D(channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(latent_dim,))
img = model(noise)
mdl = Model(noise, output = img)
return mdl
如果 face_sequence 是 False
,则模型正在生成 28x28x3
的输出。我希望当布尔变量为 True
时生成大小为 28x280x3
的输出。如何做到这一点?
您仅使用 7840 的第一个通道,然后尝试重塑为所需的形状。为此,您需要 23520 个元素 (28*280*3),但您只有 18816 个 (28*224*3)。
此代码在此过程中较早调整大小,并使用一个 UpSampling->Conv2D 产生所需的形状。
def build_generator_face(latent_dim, channels, face_sequence):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_shape=(None, latent_dim)))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
if face_sequence == False:
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
else:
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
# go from 56 to 35 and continue upsampling
model.add(Lambda(lambda x: x[:,:,:35,:]))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
pdb.set_trace()
model.add(Conv2D(channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
return model
在 GAN 的生成器模型中,我正在尝试生成特定大小的图像。我的目标尺寸是 28x280x3
。实际上,到目前为止,我正在创建 28x28x3
的生成器输出。因此,我正在尝试使用 UpSampling2D
来增加模型的大小。在三个 UpSampling2D 层之后,我能够使模型输出大小为 28x224x3
。不过,我的目标是28x280x3
。我怎样才能缩小分歧维度?我注意到有一种方法是针对调整图层大小的。它如何适用于我的情况?我的代码如下所示:
def build_generator_face(latent_dim, channels, face_sequence):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_shape=(None, latent_dim)))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
if face_sequence == False:
#model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
#model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
else:
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
pdb.set_trace()
model.add(Reshape((-1,3), input_shape=(28,224,3)))
model.add(Lambda(lambda x: x[:7840])) # throw away some, so that #data = 224^2
model.add(Reshape(28,280,3)) # this line gives me an error but am not sure if it is necessary or not the code is found in here:
model.add(Conv2D(channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(latent_dim,))
img = model(noise)
mdl = Model(noise, output = img)
return mdl
如果 face_sequence 是 False
,则模型正在生成 28x28x3
的输出。我希望当布尔变量为 True
时生成大小为 28x280x3
的输出。如何做到这一点?
您仅使用 7840 的第一个通道,然后尝试重塑为所需的形状。为此,您需要 23520 个元素 (28*280*3),但您只有 18816 个 (28*224*3)。
此代码在此过程中较早调整大小,并使用一个 UpSampling->Conv2D 产生所需的形状。
def build_generator_face(latent_dim, channels, face_sequence):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_shape=(None, latent_dim)))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
if face_sequence == False:
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
else:
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
# go from 56 to 35 and continue upsampling
model.add(Lambda(lambda x: x[:,:,:35,:]))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D(size=(1, 2)))
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
pdb.set_trace()
model.add(Conv2D(channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
return model