尝试在 Keras 上使用回调保存我的模型时,“顺序”对象没有属性“_ckpt_saved_epoch”错误

Sequential' object has no attribute '_ckpt_saved_epoch' error when trying to save my model using callback on Keras

我在尝试保存我的 MobileNet 模型时遇到此错误。

Traceback (most recent call last): File "../src/script.py", line 150, in <module> callbacks=[cb_checkpointer, cb_early_stopper] File "/opt/conda/lib/python3.6/site-packages/keras/legacy/interfaces.py", line 91, in wrapper

return func(*args, **kwargs) File "/opt/conda/lib/python3.6/site-packages/keras/engine/training.py", line 1418, in fit_generator

initial_epoch=initial_epoch) File "/opt/conda/lib/python3.6/site-packages/keras/engine/training_generator.py", line 264, in fit_generator

callbacks.on_train_end() File "/opt/conda/lib/python3.6/site-packages/keras/callbacks.py", line 142, in on_train_end callback.on_train_end(logs) File "/opt/conda/lib/python3.6/site-packages/tensorflow/python/keras/callbacks.py", line 940, in on_train_end

if self.model._ckpt_saved_epoch is not None: AttributeError: 'Sequential' object has no attribute '_ckpt_saved_epoch'

我正在使用回调进行保存:

filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
cb_early_stopper = EarlyStopping(monitor = 'val_loss', mode='min', verbose=1, patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = filepath, monitor = 'val_loss', save_best_only = True, mode = 'auto')

我的型号代码:

import numpy as np
import cv2
import os

#from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import optimizers

from keras import regularizers
from keras.regularizers import l2
from keras.layers import Dropout
from keras.applications.mobilenet import MobileNet
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, BatchNormalization
from keras.models import Sequential
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint



#print(os.listdir("testset/"))

# Fixed for our classes
NUM_CLASSES = 3

# Fixed for color images
CHANNELS = 3

IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'

# Common accuracy metric for all outputs, but can use different metrics for different output
LOSS_METRICS = ['accuracy']

# EARLY_STOP_PATIENCE must be < NUM_EPOCHS
NUM_EPOCHS = 100
EARLY_STOP_PATIENCE = 50

# These steps value should be proper FACTOR of no.-of-images in train & valid folders respectively
# Training images processed in each step would be no.-of-train-images / STEPS_PER_EPOCH_TRAINING
STEPS_PER_EPOCH_TRAINING = 27
STEPS_PER_EPOCH_VALIDATION = 11

# These steps value should be proper FACTOR of no.-of-images in train & valid folders respectively
# NOTE that these BATCH* are for Keras ImageDataGenerator batching to fill epoch step input
BATCH_SIZE_TRAINING = 8
BATCH_SIZE_VALIDATION = 8

base_mobilenet_model = MobileNet(include_top = False, weights = None)
model = Sequential()
model.add(BatchNormalization(input_shape = [224,224,3]))
model.add(base_mobilenet_model)
model.add(BatchNormalization())
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))

# 2nd layer as Dense for 3-class classification,
model.add(Dense(NUM_CLASSES, activation = DENSE_LAYER_ACTIVATION,activity_regularizer=regularizers.l2(0.01)))



model.summary()

model.compile(optimizer = 'adam', loss = OBJECTIVE_FUNCTION,  metrics =  LOSS_METRICS)

image_size = IMAGE_RESIZE
shift = 0.2
# preprocessing_function is applied on each image but only after re-sizing & augmentation (resize => augment => pre-process)
# Each of the keras.application.resnet* preprocess_input MOSTLY mean BATCH NORMALIZATION (applied on each batch) stabilize the inputs to nonlinear activation functions
# Batch Normalization helps in faster convergence
                                                    #  featurewise_center=True,  featurewise_std_normalization=True,
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input,
                                   width_shift_range=shift,
                                   height_shift_range=shift,
                                   horizontal_flip=True,
                                   vertical_flip=True,
                                   rotation_range=45,
                                   brightness_range=[0.2,1.0],
                                   zoom_range=[0.5,1.0]
                                   )

# flow_From_directory generates batches of augmented data (where augmentation can be color conversion, etc)
# Both train & valid folders must have NUM_CLASSES sub-folders
train_generator = data_generator.flow_from_directory(
        '/kaggle/input/grade-dataset/trainset/',
        target_size=(image_size, image_size),
        batch_size=BATCH_SIZE_TRAINING,
        class_mode='categorical')

validation_generator = data_generator.flow_from_directory(
        '/kaggle/input/grade-dataset/testset/',
        target_size=(image_size, image_size),
        batch_size=BATCH_SIZE_VALIDATION,
        class_mode='categorical')

# Max number of steps that these generator will have opportunity to process their source content
# len(train_generator) should be 'no. of available train images / BATCH_SIZE_TRAINING'
# len(valid_generator) should be 'no. of available train images / BATCH_SIZE_VALIDATION'
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))

# Early stopping & checkpointing the best model in ../working dir & restoring that as our model for prediction
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
cb_early_stopper = EarlyStopping(monitor = 'val_loss', mode='min', verbose=1, patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = filepath, monitor = 'val_loss', save_best_only = True, mode = 'auto')

fit_history = model.fit_generator(
        train_generator,
        steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
        epochs = NUM_EPOCHS,
        validation_data=validation_generator,
        validation_steps=STEPS_PER_EPOCH_VALIDATION,
        callbacks=[cb_checkpointer, cb_early_stopper]
)

我通过替换代码解决了问题:

from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint

使用代码:

from keras.callbacks import EarlyStopping, ModelCheckpoint

为了社区的利益,在此提及解决方案。

替换代码,

from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint

使用代码,

from keras.callbacks import EarlyStopping, ModelCheckpoint

已解决错误。