Change Model input_shape but got an : ValueError: Input 0 of layer dense_44 is incompatible with the layer
Change Model input_shape but got an : ValueError: Input 0 of layer dense_44 is incompatible with the layer
我是 python 和 DL 的新手。
请帮我改正错误。
这个 class 最初是用 mnist 数据集 (28 x 28) 创建的,我试图让它适应我的工作,我使用的图像是 (224 x 224)。我改变了输入图像的形状,但仍然有不兼容的形状图像,模型仍然使用 mnist 的旧形状。
知道我正在使用:X_train=(676, 224, 224)/y_train(676,)/X_test(170, 224, 224)/y_test (170,)
代码:
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, concatenate
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class INFOGAN():
def __init__(self):
self.img_rows = 224
self.img_cols = 224
self.channels = 1
self.num_classes = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 72
optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', self.mutual_info_loss]
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the recognition network Q
self.auxilliary.compile(loss=[self.mutual_info_loss],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
self.combined.compile(loss=losses,
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
def mutual_info_loss(self, c, c_given_x):
"""The mutual information metric we aim to minimize"""
eps = 1e-8
conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))
return conditional_entropy + entropy
def sample_generator_input(self, batch_size):
# Generator inputs
sampled_noise = np.random.normal(0, 1, (batch_size, 62))
sampled_labels = np.random.randint(0, self.num_classes, batch_size).reshape(-1, 1)
sampled_labels = to_categorical(sampled_labels, num_classes=self.num_classes)
return sampled_noise, sampled_labels
def train(self, epochs, batch_size=128, sample_interval=50):
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and categorical labels
sampled_noise, sampled_labels = self.sample_generator_input(batch_size)
gen_input = np.concatenate((sampled_noise, sampled_labels), axis=1)
# Generate a half batch of new images
gen_imgs = self.generator.predict(gen_input)
# Train on real and generated data
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Avg. loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator and Q-network
# ---------------------
g_loss = self.combined.train_on_batch(gen_input, [valid, sampled_labels])
# Plot the progress
print ("%d [D loss: %.2f, acc.: %.2f%%] [Q loss: %.2f] [G loss: %.2f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[1], g_loss[2]))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 10, 10
fig, axs = plt.subplots(r, c)
for i in range(c):
sampled_noise, _ = self.sample_generator_input(c)
label = to_categorical(np.full(fill_value=i, shape=(r,1)), num_classes=self.num_classes)
gen_input = np.concatenate((sampled_noise, label), axis=1)
gen_imgs = self.generator.predict(gen_input)
gen_imgs = 0.5 * gen_imgs + 0.5
for j in range(r):
axs[j,i].imshow(gen_imgs[j,:,:,0], cmap='gray')
axs[j,i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "generator")
save(self.discriminator, "discriminator")
if __name__ == '__main__':
infogan = INFOGAN()
infogan.train(epochs=50000, batch_size=128, sample_interval=50)
错误:
Model: "sequential_23"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_47 (Dense) (None, 6272) 457856
_________________________________________________________________
reshape_11 (Reshape) (None, 7, 7, 128) 0
_________________________________________________________________
batch_normalization_87 (Batc (None, 7, 7, 128) 512
_________________________________________________________________
up_sampling2d_40 (UpSampling (None, 14, 14, 128) 0
_________________________________________________________________
conv2d_99 (Conv2D) (None, 14, 14, 128) 147584
_________________________________________________________________
activation_42 (Activation) (None, 14, 14, 128) 0
_________________________________________________________________
batch_normalization_88 (Batc (None, 14, 14, 128) 512
_________________________________________________________________
up_sampling2d_41 (UpSampling (None, 28, 28, 128) 0
_________________________________________________________________
conv2d_100 (Conv2D) (None, 28, 28, 64) 73792
_________________________________________________________________
activation_43 (Activation) (None, 28, 28, 64) 0
_________________________________________________________________
batch_normalization_89 (Batc (None, 28, 28, 64) 256
_________________________________________________________________
conv2d_101 (Conv2D) (None, 28, 28, 1) 577
_________________________________________________________________
activation_44 (Activation) (None, 28, 28, 1) 0
=================================================================
Total params: 681,089
Trainable params: 680,449
Non-trainable params: 640
_________________________________________________________________
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input Tensor("input_22:0", shape=(None, 224, 224, 1), dtype=float32), but it was called on an input with incompatible shape (None, 28, 28, 1).
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input Tensor("conv2d_95_input:0", shape=(None, 224, 224, 1), dtype=float32), but it was called on an input with incompatible shape (None, 28, 28, 1).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-45-60a1c6b0bc8b> in <module>()
225
226 if __name__ == '__main__':
--> 227 infogan = INFOGAN()
228 infogan.train(epochs=50000, batch_size=128, sample_interval=50)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
214 ' incompatible with the layer: expected axis ' + str(axis) +
215 ' of input shape to have value ' + str(value) +
--> 216 ' but received input with shape ' + str(shape))
217 # Check shape.
218 if spec.shape is not None:
ValueError: Input 0 of layer dense_44 is incompatible with the layer: expected axis -1 of input shape to have value 115200 but received input with shape [None, 2048]
您忘记更改生成器的架构。生成器的输出形状和鉴别器的输入形状必须匹配。这就是导致错误的原因。
要修复它,您需要修复体系结构。生成器生成形状为 (28, 28, 1) 的图像,但您想要 (224, 224, 1)。架构产生的形状是架构本身及其参数的结果。
所以我添加了两个上采样层并更改了其他层的大小以匹配鉴别器的输出。
此外,我从鉴别器中删除了 ZeroPadding2D 层,因为它使形状变得奇怪(15、15、..),因此不可能在生成器中匹配相同的大小。
代码如下:
def build_generator(self):
model = Sequential()
model.add(Dense(512 * 14 * 14, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((14, 14, 512)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
#model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.summary()
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
以及摘要:
Model: "sequential_14"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_53 (Conv2D) (None, 112, 112, 64) 640
_________________________________________________________________
leaky_re_lu_28 (LeakyReLU) (None, 112, 112, 64) 0
_________________________________________________________________
dropout_28 (Dropout) (None, 112, 112, 64) 0
_________________________________________________________________
conv2d_54 (Conv2D) (None, 56, 56, 128) 73856
_________________________________________________________________
leaky_re_lu_29 (LeakyReLU) (None, 56, 56, 128) 0
_________________________________________________________________
dropout_29 (Dropout) (None, 56, 56, 128) 0
_________________________________________________________________
batch_normalization_46 (Batc (None, 56, 56, 128) 512
_________________________________________________________________
conv2d_55 (Conv2D) (None, 28, 28, 256) 295168
_________________________________________________________________
leaky_re_lu_30 (LeakyReLU) (None, 28, 28, 256) 0
_________________________________________________________________
dropout_30 (Dropout) (None, 28, 28, 256) 0
_________________________________________________________________
batch_normalization_47 (Batc (None, 28, 28, 256) 1024
_________________________________________________________________
conv2d_56 (Conv2D) (None, 14, 14, 512) 1180160
_________________________________________________________________
leaky_re_lu_31 (LeakyReLU) (None, 14, 14, 512) 0
_________________________________________________________________
dropout_31 (Dropout) (None, 14, 14, 512) 0
_________________________________________________________________
batch_normalization_48 (Batc (None, 14, 14, 512) 2048
_________________________________________________________________
flatten_7 (Flatten) (None, 100352) 0
=================================================================
Total params: 1,553,408
Trainable params: 1,551,616
Non-trainable params: 1,792
_________________________________________________________________
Model: "sequential_15"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_31 (Dense) (None, 100352) 7325696
_________________________________________________________________
reshape_7 (Reshape) (None, 14, 14, 512) 0
_________________________________________________________________
batch_normalization_49 (Batc (None, 14, 14, 512) 2048
_________________________________________________________________
up_sampling2d_18 (UpSampling (None, 28, 28, 512) 0
_________________________________________________________________
conv2d_57 (Conv2D) (None, 28, 28, 256) 1179904
_________________________________________________________________
activation_25 (Activation) (None, 28, 28, 256) 0
_________________________________________________________________
batch_normalization_50 (Batc (None, 28, 28, 256) 1024
_________________________________________________________________
up_sampling2d_19 (UpSampling (None, 56, 56, 256) 0
_________________________________________________________________
conv2d_58 (Conv2D) (None, 56, 56, 128) 295040
_________________________________________________________________
activation_26 (Activation) (None, 56, 56, 128) 0
_________________________________________________________________
batch_normalization_51 (Batc (None, 56, 56, 128) 512
_________________________________________________________________
up_sampling2d_20 (UpSampling (None, 112, 112, 128) 0
_________________________________________________________________
conv2d_59 (Conv2D) (None, 112, 112, 64) 73792
_________________________________________________________________
activation_27 (Activation) (None, 112, 112, 64) 0
_________________________________________________________________
batch_normalization_52 (Batc (None, 112, 112, 64) 256
_________________________________________________________________
up_sampling2d_21 (UpSampling (None, 224, 224, 64) 0
_________________________________________________________________
conv2d_60 (Conv2D) (None, 224, 224, 1) 577
_________________________________________________________________
activation_28 (Activation) (None, 224, 224, 1) 0
=================================================================
Total params: 8,878,849
Trainable params: 8,876,929
Non-trainable params: 1,920
_________________________________________________________________
编辑:
因为您将类的数量从10个减少到3个,所以您必须将latent_dim参数更改为65。注意方法sample_generator_input
产生了大小为62的噪声和大小为 类 的标签,然后连接起来(大小变为 62 + 3 = 65)。
生成器被定义为接受input_dim个self.latent_dim
,在构造函数中根据类个数来计算latent_dim
比较合适: self.latent_dim = 62 + self.num_classes
.
此外,在方法sample_images
中,有硬编码的魔法数字。
怎么知道是什么意思呢?我的意思是:r, c = 10, 10
。
我假设这意味着 类 的数量。由于您在示例中将它从 10 更改为 3,我建议您将行更改为:
r, c = self.num_classes, self.num_classes
总的来说,代码写得不好,如果你改变一个常量,它就会崩溃。复制完整代码时要小心。在复制之前确保你理解它的每一部分。
完整代码如下:
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, concatenate
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class INFOGAN():
def __init__(self):
self.img_rows = 224
self.img_cols = 224
self.channels = 1
self.num_classes = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 62 + self.num_classes
optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', self.mutual_info_loss]
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the recognition network Q
self.auxilliary.compile(loss=[self.mutual_info_loss],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
self.combined.compile(loss=losses,
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(512 * 14 * 14, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((14, 14, 512)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
#model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.summary()
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
print(label.shape)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
def mutual_info_loss(self, c, c_given_x):
"""The mutual information metric we aim to minimize"""
eps = 1e-8
conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))
return conditional_entropy + entropy
def sample_generator_input(self, batch_size):
# Generator inputs
sampled_noise = np.random.normal(0, 1, (batch_size, 62))
sampled_labels = np.random.randint(0, self.num_classes, batch_size).reshape(-1, 1)
print(sampled_labels)
sampled_labels = to_categorical(sampled_labels, num_classes=self.num_classes)
return sampled_noise, sampled_labels
def train(self, epochs, batch_size=128, sample_interval=50):
X_train = np.ones([batch_size, 224, 224])
y_train = np.zeros([batch_size,])
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and categorical labels
sampled_noise, sampled_labels = self.sample_generator_input(batch_size)
gen_input = np.concatenate((sampled_noise, sampled_labels), axis=1)
print(sampled_labels.shape, batch_size)
# Generate a half batch of new images
gen_imgs = self.generator.predict(gen_input)
# Train on real and generated data
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Avg. loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator and Q-network
# ---------------------
g_loss = self.combined.train_on_batch(gen_input, [valid, sampled_labels])
# Plot the progress
print ("%d [D loss: %.2f, acc.: %.2f%%] [Q loss: %.2f] [G loss: %.2f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[1], g_loss[2]))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = self.num_classes, self.num_classes
fig, axs = plt.subplots(r, c)
for i in range(c):
sampled_noise, _ = self.sample_generator_input(c)
label = to_categorical(np.full(fill_value=i, shape=(r,1)), num_classes=self.num_classes)
gen_input = np.concatenate((sampled_noise, label), axis=1)
gen_imgs = self.generator.predict(gen_input)
gen_imgs = 0.5 * gen_imgs + 0.5
for j in range(r):
axs[j,i].imshow(gen_imgs[j,:,:,0], cmap='gray')
axs[j,i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "generator")
save(self.discriminator, "discriminator")
if __name__ == '__main__':
infogan = INFOGAN()
infogan.train(epochs=50000, batch_size=8, sample_interval=50)
我是 python 和 DL 的新手。 请帮我改正错误。 这个 class 最初是用 mnist 数据集 (28 x 28) 创建的,我试图让它适应我的工作,我使用的图像是 (224 x 224)。我改变了输入图像的形状,但仍然有不兼容的形状图像,模型仍然使用 mnist 的旧形状。 知道我正在使用:X_train=(676, 224, 224)/y_train(676,)/X_test(170, 224, 224)/y_test (170,)
代码:
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, concatenate
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class INFOGAN():
def __init__(self):
self.img_rows = 224
self.img_cols = 224
self.channels = 1
self.num_classes = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 72
optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', self.mutual_info_loss]
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the recognition network Q
self.auxilliary.compile(loss=[self.mutual_info_loss],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
self.combined.compile(loss=losses,
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
def mutual_info_loss(self, c, c_given_x):
"""The mutual information metric we aim to minimize"""
eps = 1e-8
conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))
return conditional_entropy + entropy
def sample_generator_input(self, batch_size):
# Generator inputs
sampled_noise = np.random.normal(0, 1, (batch_size, 62))
sampled_labels = np.random.randint(0, self.num_classes, batch_size).reshape(-1, 1)
sampled_labels = to_categorical(sampled_labels, num_classes=self.num_classes)
return sampled_noise, sampled_labels
def train(self, epochs, batch_size=128, sample_interval=50):
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and categorical labels
sampled_noise, sampled_labels = self.sample_generator_input(batch_size)
gen_input = np.concatenate((sampled_noise, sampled_labels), axis=1)
# Generate a half batch of new images
gen_imgs = self.generator.predict(gen_input)
# Train on real and generated data
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Avg. loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator and Q-network
# ---------------------
g_loss = self.combined.train_on_batch(gen_input, [valid, sampled_labels])
# Plot the progress
print ("%d [D loss: %.2f, acc.: %.2f%%] [Q loss: %.2f] [G loss: %.2f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[1], g_loss[2]))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 10, 10
fig, axs = plt.subplots(r, c)
for i in range(c):
sampled_noise, _ = self.sample_generator_input(c)
label = to_categorical(np.full(fill_value=i, shape=(r,1)), num_classes=self.num_classes)
gen_input = np.concatenate((sampled_noise, label), axis=1)
gen_imgs = self.generator.predict(gen_input)
gen_imgs = 0.5 * gen_imgs + 0.5
for j in range(r):
axs[j,i].imshow(gen_imgs[j,:,:,0], cmap='gray')
axs[j,i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "generator")
save(self.discriminator, "discriminator")
if __name__ == '__main__':
infogan = INFOGAN()
infogan.train(epochs=50000, batch_size=128, sample_interval=50)
错误:
Model: "sequential_23"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_47 (Dense) (None, 6272) 457856
_________________________________________________________________
reshape_11 (Reshape) (None, 7, 7, 128) 0
_________________________________________________________________
batch_normalization_87 (Batc (None, 7, 7, 128) 512
_________________________________________________________________
up_sampling2d_40 (UpSampling (None, 14, 14, 128) 0
_________________________________________________________________
conv2d_99 (Conv2D) (None, 14, 14, 128) 147584
_________________________________________________________________
activation_42 (Activation) (None, 14, 14, 128) 0
_________________________________________________________________
batch_normalization_88 (Batc (None, 14, 14, 128) 512
_________________________________________________________________
up_sampling2d_41 (UpSampling (None, 28, 28, 128) 0
_________________________________________________________________
conv2d_100 (Conv2D) (None, 28, 28, 64) 73792
_________________________________________________________________
activation_43 (Activation) (None, 28, 28, 64) 0
_________________________________________________________________
batch_normalization_89 (Batc (None, 28, 28, 64) 256
_________________________________________________________________
conv2d_101 (Conv2D) (None, 28, 28, 1) 577
_________________________________________________________________
activation_44 (Activation) (None, 28, 28, 1) 0
=================================================================
Total params: 681,089
Trainable params: 680,449
Non-trainable params: 640
_________________________________________________________________
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input Tensor("input_22:0", shape=(None, 224, 224, 1), dtype=float32), but it was called on an input with incompatible shape (None, 28, 28, 1).
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 1) for input Tensor("conv2d_95_input:0", shape=(None, 224, 224, 1), dtype=float32), but it was called on an input with incompatible shape (None, 28, 28, 1).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-45-60a1c6b0bc8b> in <module>()
225
226 if __name__ == '__main__':
--> 227 infogan = INFOGAN()
228 infogan.train(epochs=50000, batch_size=128, sample_interval=50)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/input_spec.py in assert_input_compatibility(input_spec, inputs, layer_name)
214 ' incompatible with the layer: expected axis ' + str(axis) +
215 ' of input shape to have value ' + str(value) +
--> 216 ' but received input with shape ' + str(shape))
217 # Check shape.
218 if spec.shape is not None:
ValueError: Input 0 of layer dense_44 is incompatible with the layer: expected axis -1 of input shape to have value 115200 but received input with shape [None, 2048]
您忘记更改生成器的架构。生成器的输出形状和鉴别器的输入形状必须匹配。这就是导致错误的原因。
要修复它,您需要修复体系结构。生成器生成形状为 (28, 28, 1) 的图像,但您想要 (224, 224, 1)。架构产生的形状是架构本身及其参数的结果。
所以我添加了两个上采样层并更改了其他层的大小以匹配鉴别器的输出。
此外,我从鉴别器中删除了 ZeroPadding2D 层,因为它使形状变得奇怪(15、15、..),因此不可能在生成器中匹配相同的大小。
代码如下:
def build_generator(self):
model = Sequential()
model.add(Dense(512 * 14 * 14, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((14, 14, 512)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
#model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.summary()
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
以及摘要:
Model: "sequential_14"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_53 (Conv2D) (None, 112, 112, 64) 640
_________________________________________________________________
leaky_re_lu_28 (LeakyReLU) (None, 112, 112, 64) 0
_________________________________________________________________
dropout_28 (Dropout) (None, 112, 112, 64) 0
_________________________________________________________________
conv2d_54 (Conv2D) (None, 56, 56, 128) 73856
_________________________________________________________________
leaky_re_lu_29 (LeakyReLU) (None, 56, 56, 128) 0
_________________________________________________________________
dropout_29 (Dropout) (None, 56, 56, 128) 0
_________________________________________________________________
batch_normalization_46 (Batc (None, 56, 56, 128) 512
_________________________________________________________________
conv2d_55 (Conv2D) (None, 28, 28, 256) 295168
_________________________________________________________________
leaky_re_lu_30 (LeakyReLU) (None, 28, 28, 256) 0
_________________________________________________________________
dropout_30 (Dropout) (None, 28, 28, 256) 0
_________________________________________________________________
batch_normalization_47 (Batc (None, 28, 28, 256) 1024
_________________________________________________________________
conv2d_56 (Conv2D) (None, 14, 14, 512) 1180160
_________________________________________________________________
leaky_re_lu_31 (LeakyReLU) (None, 14, 14, 512) 0
_________________________________________________________________
dropout_31 (Dropout) (None, 14, 14, 512) 0
_________________________________________________________________
batch_normalization_48 (Batc (None, 14, 14, 512) 2048
_________________________________________________________________
flatten_7 (Flatten) (None, 100352) 0
=================================================================
Total params: 1,553,408
Trainable params: 1,551,616
Non-trainable params: 1,792
_________________________________________________________________
Model: "sequential_15"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_31 (Dense) (None, 100352) 7325696
_________________________________________________________________
reshape_7 (Reshape) (None, 14, 14, 512) 0
_________________________________________________________________
batch_normalization_49 (Batc (None, 14, 14, 512) 2048
_________________________________________________________________
up_sampling2d_18 (UpSampling (None, 28, 28, 512) 0
_________________________________________________________________
conv2d_57 (Conv2D) (None, 28, 28, 256) 1179904
_________________________________________________________________
activation_25 (Activation) (None, 28, 28, 256) 0
_________________________________________________________________
batch_normalization_50 (Batc (None, 28, 28, 256) 1024
_________________________________________________________________
up_sampling2d_19 (UpSampling (None, 56, 56, 256) 0
_________________________________________________________________
conv2d_58 (Conv2D) (None, 56, 56, 128) 295040
_________________________________________________________________
activation_26 (Activation) (None, 56, 56, 128) 0
_________________________________________________________________
batch_normalization_51 (Batc (None, 56, 56, 128) 512
_________________________________________________________________
up_sampling2d_20 (UpSampling (None, 112, 112, 128) 0
_________________________________________________________________
conv2d_59 (Conv2D) (None, 112, 112, 64) 73792
_________________________________________________________________
activation_27 (Activation) (None, 112, 112, 64) 0
_________________________________________________________________
batch_normalization_52 (Batc (None, 112, 112, 64) 256
_________________________________________________________________
up_sampling2d_21 (UpSampling (None, 224, 224, 64) 0
_________________________________________________________________
conv2d_60 (Conv2D) (None, 224, 224, 1) 577
_________________________________________________________________
activation_28 (Activation) (None, 224, 224, 1) 0
=================================================================
Total params: 8,878,849
Trainable params: 8,876,929
Non-trainable params: 1,920
_________________________________________________________________
编辑:
因为您将类的数量从10个减少到3个,所以您必须将latent_dim参数更改为65。注意方法sample_generator_input
产生了大小为62的噪声和大小为 类 的标签,然后连接起来(大小变为 62 + 3 = 65)。
生成器被定义为接受input_dim个self.latent_dim
,在构造函数中根据类个数来计算latent_dim
比较合适: self.latent_dim = 62 + self.num_classes
.
此外,在方法sample_images
中,有硬编码的魔法数字。
怎么知道是什么意思呢?我的意思是:r, c = 10, 10
。
我假设这意味着 类 的数量。由于您在示例中将它从 10 更改为 3,我建议您将行更改为:
r, c = self.num_classes, self.num_classes
总的来说,代码写得不好,如果你改变一个常量,它就会崩溃。复制完整代码时要小心。在复制之前确保你理解它的每一部分。
完整代码如下:
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, concatenate
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class INFOGAN():
def __init__(self):
self.img_rows = 224
self.img_cols = 224
self.channels = 1
self.num_classes = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 62 + self.num_classes
optimizer = Adam(0.0002, 0.5)
losses = ['binary_crossentropy', self.mutual_info_loss]
# Build and the discriminator and recognition network
self.discriminator, self.auxilliary = self.build_disk_and_q_net()
self.discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the recognition network Q
self.auxilliary.compile(loss=[self.mutual_info_loss],
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise and the target label as input
# and generates the corresponding digit of that label
gen_input = Input(shape=(self.latent_dim,))
img = self.generator(gen_input)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
valid = self.discriminator(img)
# The recognition network produces the label
target_label = self.auxilliary(img)
# The combined model (stacked generator and discriminator)
self.combined = Model(gen_input, [valid, target_label])
self.combined.compile(loss=losses,
optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Dense(512 * 14 * 14, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((14, 14, 512)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(self.channels, kernel_size=3, padding='same'))
model.add(Activation("tanh"))
gen_input = Input(shape=(self.latent_dim,))
img = model(gen_input)
model.summary()
return Model(gen_input, img)
def build_disk_and_q_net(self):
img = Input(shape=self.img_shape)
# Shared layers between discriminator and recognition network
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
#model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.summary()
img_embedding = model(img)
# Discriminator
validity = Dense(1, activation='sigmoid')(img_embedding)
# Recognition
q_net = Dense(128, activation='relu')(img_embedding)
label = Dense(self.num_classes, activation='softmax')(q_net)
print(label.shape)
# Return discriminator and recognition network
return Model(img, validity), Model(img, label)
def mutual_info_loss(self, c, c_given_x):
"""The mutual information metric we aim to minimize"""
eps = 1e-8
conditional_entropy = K.mean(- K.sum(K.log(c_given_x + eps) * c, axis=1))
entropy = K.mean(- K.sum(K.log(c + eps) * c, axis=1))
return conditional_entropy + entropy
def sample_generator_input(self, batch_size):
# Generator inputs
sampled_noise = np.random.normal(0, 1, (batch_size, 62))
sampled_labels = np.random.randint(0, self.num_classes, batch_size).reshape(-1, 1)
print(sampled_labels)
sampled_labels = to_categorical(sampled_labels, num_classes=self.num_classes)
return sampled_noise, sampled_labels
def train(self, epochs, batch_size=128, sample_interval=50):
X_train = np.ones([batch_size, 224, 224])
y_train = np.zeros([batch_size,])
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise and categorical labels
sampled_noise, sampled_labels = self.sample_generator_input(batch_size)
gen_input = np.concatenate((sampled_noise, sampled_labels), axis=1)
print(sampled_labels.shape, batch_size)
# Generate a half batch of new images
gen_imgs = self.generator.predict(gen_input)
# Train on real and generated data
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
# Avg. loss
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator and Q-network
# ---------------------
g_loss = self.combined.train_on_batch(gen_input, [valid, sampled_labels])
# Plot the progress
print ("%d [D loss: %.2f, acc.: %.2f%%] [Q loss: %.2f] [G loss: %.2f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[1], g_loss[2]))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = self.num_classes, self.num_classes
fig, axs = plt.subplots(r, c)
for i in range(c):
sampled_noise, _ = self.sample_generator_input(c)
label = to_categorical(np.full(fill_value=i, shape=(r,1)), num_classes=self.num_classes)
gen_input = np.concatenate((sampled_noise, label), axis=1)
gen_imgs = self.generator.predict(gen_input)
gen_imgs = 0.5 * gen_imgs + 0.5
for j in range(r):
axs[j,i].imshow(gen_imgs[j,:,:,0], cmap='gray')
axs[j,i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "generator")
save(self.discriminator, "discriminator")
if __name__ == '__main__':
infogan = INFOGAN()
infogan.train(epochs=50000, batch_size=8, sample_interval=50)