运行多次,对CNN的Image classification accuracy有影响吗?

Running multiple times, is there any impact to accuracy of Image classification on CNN?

我是初学者。 我通过以下程序在 Jupyter 笔记本上使用 MNIST 对 CNN 模型进行图像分类。当从“model.fit_generator”连续运行两次(或多次)时,第二个运行的准确性是否受第一个运行的结果影响?看到指示的结果,我认为第二个 运行 似乎已经是一个更好的准确性。请给我一些建议...

# import library
import keras
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sn
import shutil
import tensorflow as tf
from datetime import datetime, timedelta, timezone
from keras import backend as ke
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, BatchNormalization
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from tqdm import tqdm

# MNIST
mnist=keras.datasets.mnist
(x_train,y_train),(x_test,y_test)=mnist.load_data()
(x_train,y_train),(x_test,y_test)=(x_train[:80],y_train[:80]),(x_test[:20], y_test[:20])
#(x_train,y_train),(x_test,y_test)=(x_train[:160],y_train[:160]),(x_test[:40], y_test[:40])
#(x_train,y_train),(x_test,y_test)=(x_train[:800],y_train[:800]),(x_test[:200], y_test[:200])
#(x_train,y_train),(x_test,y_test)=(x_train[:8000],y_train[:8000]),(x_test[:2000], y_test[:2000])
x_train=x_train.reshape(x_train.shape[0],28,28,1)
x_test=x_test.reshape(x_test.shape[0],28,28,1)
x_train=x_train/255
x_test=x_test/255
print("x_train",x_train.shape)
print("x_test",x_test.shape)

# model
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=(28,28,1), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.20))
model.add(Conv2D(64, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.20))
model.add(Conv2D(128, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()

# model compile
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# model fit
model.fit(x_train,y_train,epochs=40)

# evoluate for test data
loss,acc=model.evaluate(x_test,y_test,verbose=2)
print('loss:','{:.3f}'.format(loss),'accuracy:','{:.3f}'.format(acc))

# ImageDataGenerator
datagen = ImageDataGenerator( 
    featurewise_center=False,
    samplewise_center=False, 
    featurewise_std_normalization=False, 
    samplewise_std_normalization=False, 
    zca_whitening=False,  
    rotation_range=10, 
    width_shift_range=0.1, 
    height_shift_range=0.1,  
    zoom_range=[2.0,0.1], 
    horizontal_flip=False, 
    vertical_flip=False)  
datagen.fit(x_train) 

datagent = ImageDataGenerator(
    featurewise_center=False,
    samplewise_center=False,  
    featurewise_std_normalization=False, 
    samplewise_std_normalization=False,  
    zca_whitening=False,  
    rotation_range=10, 
    width_shift_range=0.1,  
    height_shift_range=0.1,
    zoom_range=[2.0,0.1],
    horizontal_flip=False,
    vertical_flip=False)
datagent.fit(x_test)

# parameter
epochs = 100
iteration_train = 5
iteration_test = 2
batch_size_train = int(x_train.shape[0] / iteration_train)
batch_size_test = int(x_test.shape[0] / iteration_test)

gen_train_flow = datagen.flow(x_train, y_train, batch_size=batch_size_train) 
gen_test_flow  = datagent.flow(x_test, y_test, batch_size=batch_size_test) 
history = model.fit(gen_train_flow,
                    steps_per_epoch=iteration_train,
                    epochs=epochs,
                    validation_data=gen_test_flow,
                    validation_steps=iteration_test)#,
                    #callbacks=callbacks)

# evoluate for test data
loss,acc=model.evaluate(x_test,y_test,verbose=2)
print('loss:','{:.3f}'.format(loss),'accuracy:','{:.3f}'.format(acc))

# graph for training
acc=history.history['accuracy']#acc
val_acc=history.history['val_accuracy']#val_acc
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'b',label='Training accuracy')
plt.plot(epochs,val_acc,'r',label='Val accuracy')
plt.legend()
plt.show()

您训练 CNN 的次数越多,它就越会开始学习噪声。

因此,它将开始在训练准确率上表现得非常好,但验证准确率会下降,因为它无法很好地概括未见过的数据。

查看此以了解更多信息:bias-variance tradeoff