TensorFlow 图像增强:数据生成 - ValueError

TensorFlow Image Augmentation: datagen - ValueError

对于 TensorFlow 2.6、Python 3.9 和 CIFAR-10 数据集,我正在尝试训练一个简单的 Conv 神经网络模型,定义如下:

def conv6_cnn():
    """
    Function to define the architecture of a neural network model
    following Conv-6 architecture for CIFAR-10 dataset and using
    provided parameter which are used to prune the model.
    
    Conv-6 architecture-
    64, 64, pool  -- convolutional layers
    128, 128, pool -- convolutional layers
    256, 256, pool -- convolutional layers
    256, 256, 10  -- fully connected layers
    
    Output: Returns designed and compiled neural network model
    """
    
    # l = tf.keras.layers
    
    model = Sequential()
    
    model.add(
        Conv2D(
            filters = 64, kernel_size = (3, 3),
            activation='relu', kernel_initializer = tf.keras.initializers.GlorotNormal(),
            strides = (1, 1), padding = 'same',
            input_shape=(32, 32, 3)
        )    
    )
        
    model.add(
        Conv2D(
            filters = 64, kernel_size = (3, 3),
            activation='relu', kernel_initializer = tf.keras.initializers.GlorotNormal(),
            strides = (1, 1), padding = 'same'
        )
    )
    
    model.add(
        MaxPooling2D(
            pool_size = (2, 2),
            strides = (2, 2)
        )
    )
    
    model.add(
        Conv2D(
            filters = 128, kernel_size = (3, 3),
            activation='relu', kernel_initializer = tf.keras.initializers.GlorotNormal(),
            strides = (1, 1), padding = 'same'
        )
    )

    model.add(
        Conv2D(
            filters = 128, kernel_size = (3, 3),
            activation='relu', kernel_initializer = tf.keras.initializers.GlorotNormal(),
            strides = (1, 1), padding = 'same'
        )
    )

    model.add(
        MaxPooling2D(
            pool_size = (2, 2),
            strides = (2, 2)
        )
    )

    model.add(
        Conv2D(
            filters = 256, kernel_size = (3, 3),
            activation='relu', kernel_initializer = tf.keras.initializers.GlorotNormal(),
            strides = (1, 1), padding = 'same'
        )
    )

    model.add(
        Conv2D(
            filters = 256, kernel_size = (3, 3),
            activation='relu', kernel_initializer = tf.keras.initializers.GlorotNormal(),
            strides = (1, 1), padding = 'same'
        )
    )

    model.add(
        MaxPooling2D(
            pool_size = (2, 2),
            strides = (2, 2)
        )
    )
    
    model.add(Flatten())
    
    model.add(
        Dense(
            units = 256, activation = 'relu',
            kernel_initializer = tf.keras.initializers.GlorotNormal()
        )
    )
    
    model.add(
        Dense(
            units = 256, activation = 'relu',
            kernel_initializer = tf.keras.initializers.GlorotNormal()
        )
    )
    
    model.add(
        Dense(
            units = 10, activation = 'softmax'
        )
    )
    
    return model

# Initialize a Conv-6 CNN object-
model = conv6_cnn()

# Define data Augmentation using ImageDataGenerator:

# Initialize and define the image data generator-
datagen = ImageDataGenerator(
    # featurewise_center=True,
    # featurewise_std_normalization=True,
    rotation_range = 90,
    width_shift_range = 0.1,
    height_shift_range = 0.1,
    horizontal_flip = True
)

# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)

# Compile defined model-
model.compile(
    optimizer = optimizer,
    loss = loss_fn,
    metrics = ['accuracy']
    )

# Define early stopping criterion-
early_stopping = tf.keras.callbacks.EarlyStopping(
    monitor = 'val_loss', min_delta = 0.001,
    patience = 4, verbose = 0,
    mode = 'auto', baseline = None,
    restore_best_weights = True
)

当我使用以下代码在没有任何数据增强的情况下训练这个 CNN 模型时,似乎没有问题:

# Train model without any data augmentation-
history = model.fit(
    x = X_train, y = y_train,
    batch_size = batch_size, epochs = num_epochs,
    callbacks = [early_stopping],
    validation_data = (X_test, y_test)
    )

然而,当使用数据(图像)增强时:

# Train model on batches with real-time data augmentation-
training_history = model.fit(
    datagen.flow(
        X_train, y_train,
        batch_size = batch_size, subset = 'training'
        ),
        validation_data = (X_test, y_test),
        steps_per_epoch = len(X_train) / batch_size,
        epochs = num_epochs,
        callbacks = [early_stopping]
        )

它给出了错误:

ValueError: Training and validation subsets have different number of classes after the split. If your numpy arrays are sorted by the label, you might want to shuffle them.

您只需删除参数 subset='training',因为您没有在 ImageDataGenerator 中设置 validation_split。必须设置这两个参数才能工作,或者您只是不使用它们:

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)

datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    # featurewise_center=True,
    # featurewise_std_normalization=True,
    rotation_range = 90,
    width_shift_range = 0.1,
    height_shift_range = 0.1,
    horizontal_flip = True
)

datagen.fit(x_train)

# Compile defined model-
model.compile(
    optimizer = tf.keras.optimizers.Adam(),
    loss = tf.keras.losses.CategoricalCrossentropy(),
    metrics = ['accuracy']
    )

# Define early stopping criterion-
early_stopping = tf.keras.callbacks.EarlyStopping(
    monitor = 'val_loss', min_delta = 0.001,
    patience = 4, verbose = 0,
    mode = 'auto', baseline = None,
    restore_best_weights = True
)
batch_size = 32

training_history = model.fit(
    datagen.flow(
        x_train, y_train,
        batch_size = batch_size
        ),
        steps_per_epoch = len(x_train) // batch_size,
        epochs = 2,
        callbacks = [early_stopping])

查看 docs 了解更多信息。