CNN精度绘图
CNN accuracy plotting
我使用卷积神经网络 (CNN) 来训练数据集,我想为此绘制精度图。之前,我尝试使用 matplotlib 但我无法成功,那么如何绘制这段代码的准确性?
from matplotlib import pyplot
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.compat.v1.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 4, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model yuklendi!')
train = train_data[:-200]
test = train_data[-200:]
X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
test_y = [i[1] for i in test]
model.fit({'input': X}, {'targets': Y}, n_epoch=1, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=40, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
import matplotlib.pyplot as plt
history = model.fit({'input': X}, {'targets': Y}, n_epoch=1, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=40, show_metric=True, run_id=MODEL_NAME)
plt.plot(history.history['accuracy'])
下面的函数将在一个图中绘制训练损失和验证损失,在第二个图中绘制训练准确度和验证准确度。
def tr_plot(tr_data, start_epoch):
#Plot the training and validation data
tacc=tr_data.history['accuracy']
tloss=tr_data.history['loss']
vacc=tr_data.history['val_accuracy']
vloss=tr_data.history['val_loss']
Epoch_count=len(tacc)+ start_epoch
Epochs=[]
for i in range (start_epoch ,Epoch_count):
Epochs.append(i+1)
index_loss=np.argmin(vloss)# this is the epoch with the lowest validation loss
val_lowest=vloss[index_loss]
index_acc=np.argmax(vacc)
acc_highest=vacc[index_acc]
plt.style.use('fivethirtyeight')
sc_label='best epoch= '+ str(index_loss+1 +start_epoch)
vc_label='best epoch= '+ str(index_acc + 1+ start_epoch)
fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(20,8))
axes[0].plot(Epochs,tloss, 'r', label='Training loss')
axes[0].plot(Epochs,vloss,'g',label='Validation loss' )
axes[0].scatter(index_loss+1 +start_epoch,val_lowest, s=150, c= 'blue', label=sc_label)
axes[0].set_title('Training and Validation Loss')
axes[0].set_xlabel('Epochs')
axes[0].set_ylabel('Loss')
axes[0].legend()
axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')
axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')
axes[1].scatter(index_acc+1 +start_epoch,acc_highest, s=150, c= 'blue', label=vc_label)
axes[1].set_title('Training and Validation Accuracy')
axes[1].set_xlabel('Epochs')
axes[1].set_ylabel('Accuracy')
axes[1].legend()
plt.tight_layout
plt.show()
tr_plot(history,0)
您无法在 tflearn 中从 model.fit 导出历史记录,但您可以使用助手或回调。
[样本]:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import os
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
import numpy as np
# add training - 1 DNN ==============================================================================================
tflearn.init_graph(num_cores=1, gpu_memory_fraction=1.0) # num_cores=8, gpu_memory_fraction=0.5
accuracy = [ ]
loss = [ ]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Class
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MonitorCallback(tflearn.callbacks.Callback):
def __init__(self, api):
self.my_monitor_api = api
#def on_batch_end(training_state, snapshot, log={}):
def on_sub_batch_end(self, training_state, train_index=0):
try:
accuracy.append( str(training_state.acc_value) )
loss.append( str(training_state.loss_value) )
except Exception as e:
print(str(e))
monitorCallback = MonitorCallback(tflearn)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = 'F:\datasets\downloads\sample\cats_dogs\training'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
MODEL_NAME = 'DEKDEE'
images = [ 'F:\datasets\downloads\sample\cats_dogs\training\train\cats\01 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\02 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\03 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\04 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\05 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\06 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\07 32x32.jpg' ]
labels = [ ]
labels.append( [ 1, 0 ,0 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 1 ,0 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,1 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,1 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,1 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,0 ,1 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,0 ,0 ,1 ] )
list_image = [ ]
for item in images :
list_image.append( plt.imread( item ) )
convnet = input_data(shape=[None, 32, 32, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 7, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
hist = model.fit({'input': list_image}, {'targets': labels}, n_epoch=15, validation_set=({'input': list_image}, {'targets': labels}), snapshot_step=40, show_metric=True, run_id=MODEL_NAME, callbacks=monitorCallback)
print( hist )
plt.plot( accuracy )
plt.plot( loss )
plt.show()
plt.close()
input('...')
[输出]:
--
Training Step: 7 | total loss: [1m[32m1.86701[0m[0m | time: 1.035s
| Adam | epoch: 007 | loss: 1.86701 - acc: 0.4050 | val_loss: 1.66956 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 8 | total loss: [1m[32m1.82322[0m[0m | time: 1.038s
| Adam | epoch: 008 | loss: 1.82322 - acc: 0.4183 | val_loss: 1.63044 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 9 | total loss: [1m[32m1.72288[0m[0m | time: 1.031s
| Adam | epoch: 009 | loss: 1.72288 - acc: 0.4994 | val_loss: 1.59387 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 10 | total loss: [1m[32m1.67410[0m[0m | time: 1.018s
| Adam | epoch: 010 | loss: 1.67410 - acc: 0.3925 | val_loss: 1.54277 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 11 | total loss: [1m[32m1.57482[0m[0m | time: 1.019s
| Adam | epoch: 011 | loss: 1.57482 - acc: 0.4773 | val_loss: 1.48715 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 12 | total loss: [1m[32m1.59550[0m[0m | time: 1.021s
| Adam | epoch: 012 | loss: 1.59550 - acc: 0.5196 | val_loss: 1.43774 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 13 | total loss: [1m[32m1.68833[0m[0m | time: 1.027s
| Adam | epoch: 013 | loss: 1.68833 - acc: 0.4194 | val_loss: 1.39414 - val_acc: 0.8571 -- iter: 7/7
--
Training Step: 14 | total loss: [1m[32m1.59238[0m[0m | time: 1.020s
| Adam | epoch: 014 | loss: 1.59238 - acc: 0.4816 | val_loss: 1.34550 - val_acc: 0.8571 -- iter: 7/7
--
Training Step: 15 | total loss: [1m[32m1.52356[0m[0m | time: 1.031s
| Adam | epoch: 015 | loss: 1.52356 - acc: 0.5167 | val_loss: 1.29574 - val_acc: 1.0000 -- iter: 7/7
--
None
我使用卷积神经网络 (CNN) 来训练数据集,我想为此绘制精度图。之前,我尝试使用 matplotlib 但我无法成功,那么如何绘制这段代码的准确性?
from matplotlib import pyplot
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.compat.v1.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 4, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model yuklendi!')
train = train_data[:-200]
test = train_data[-200:]
X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,3)
test_y = [i[1] for i in test]
model.fit({'input': X}, {'targets': Y}, n_epoch=1, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=40, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
import matplotlib.pyplot as plt
history = model.fit({'input': X}, {'targets': Y}, n_epoch=1, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=40, show_metric=True, run_id=MODEL_NAME)
plt.plot(history.history['accuracy'])
下面的函数将在一个图中绘制训练损失和验证损失,在第二个图中绘制训练准确度和验证准确度。
def tr_plot(tr_data, start_epoch):
#Plot the training and validation data
tacc=tr_data.history['accuracy']
tloss=tr_data.history['loss']
vacc=tr_data.history['val_accuracy']
vloss=tr_data.history['val_loss']
Epoch_count=len(tacc)+ start_epoch
Epochs=[]
for i in range (start_epoch ,Epoch_count):
Epochs.append(i+1)
index_loss=np.argmin(vloss)# this is the epoch with the lowest validation loss
val_lowest=vloss[index_loss]
index_acc=np.argmax(vacc)
acc_highest=vacc[index_acc]
plt.style.use('fivethirtyeight')
sc_label='best epoch= '+ str(index_loss+1 +start_epoch)
vc_label='best epoch= '+ str(index_acc + 1+ start_epoch)
fig,axes=plt.subplots(nrows=1, ncols=2, figsize=(20,8))
axes[0].plot(Epochs,tloss, 'r', label='Training loss')
axes[0].plot(Epochs,vloss,'g',label='Validation loss' )
axes[0].scatter(index_loss+1 +start_epoch,val_lowest, s=150, c= 'blue', label=sc_label)
axes[0].set_title('Training and Validation Loss')
axes[0].set_xlabel('Epochs')
axes[0].set_ylabel('Loss')
axes[0].legend()
axes[1].plot (Epochs,tacc,'r',label= 'Training Accuracy')
axes[1].plot (Epochs,vacc,'g',label= 'Validation Accuracy')
axes[1].scatter(index_acc+1 +start_epoch,acc_highest, s=150, c= 'blue', label=vc_label)
axes[1].set_title('Training and Validation Accuracy')
axes[1].set_xlabel('Epochs')
axes[1].set_ylabel('Accuracy')
axes[1].legend()
plt.tight_layout
plt.show()
tr_plot(history,0)
您无法在 tflearn 中从 model.fit 导出历史记录,但您可以使用助手或回调。
[样本]:
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import os
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
import numpy as np
# add training - 1 DNN ==============================================================================================
tflearn.init_graph(num_cores=1, gpu_memory_fraction=1.0) # num_cores=8, gpu_memory_fraction=0.5
accuracy = [ ]
loss = [ ]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Class
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MonitorCallback(tflearn.callbacks.Callback):
def __init__(self, api):
self.my_monitor_api = api
#def on_batch_end(training_state, snapshot, log={}):
def on_sub_batch_end(self, training_state, train_index=0):
try:
accuracy.append( str(training_state.acc_value) )
loss.append( str(training_state.loss_value) )
except Exception as e:
print(str(e))
monitorCallback = MonitorCallback(tflearn)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = 'F:\datasets\downloads\sample\cats_dogs\training'
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
MODEL_NAME = 'DEKDEE'
images = [ 'F:\datasets\downloads\sample\cats_dogs\training\train\cats\01 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\02 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\03 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\04 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\05 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\06 32x32.jpg',
'F:\datasets\downloads\sample\cats_dogs\training\train\cats\07 32x32.jpg' ]
labels = [ ]
labels.append( [ 1, 0 ,0 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 1 ,0 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,1 ,0 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,1 ,0 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,1 ,0 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,0 ,1 ,0 ] )
labels.append( [ 0, 0 ,0 ,0 ,0 ,0 ,1 ] )
list_image = [ ]
for item in images :
list_image.append( plt.imread( item ) )
convnet = input_data(shape=[None, 32, 32, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 7, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=0.0001, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
hist = model.fit({'input': list_image}, {'targets': labels}, n_epoch=15, validation_set=({'input': list_image}, {'targets': labels}), snapshot_step=40, show_metric=True, run_id=MODEL_NAME, callbacks=monitorCallback)
print( hist )
plt.plot( accuracy )
plt.plot( loss )
plt.show()
plt.close()
input('...')
[输出]:
--
Training Step: 7 | total loss: [1m[32m1.86701[0m[0m | time: 1.035s
| Adam | epoch: 007 | loss: 1.86701 - acc: 0.4050 | val_loss: 1.66956 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 8 | total loss: [1m[32m1.82322[0m[0m | time: 1.038s
| Adam | epoch: 008 | loss: 1.82322 - acc: 0.4183 | val_loss: 1.63044 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 9 | total loss: [1m[32m1.72288[0m[0m | time: 1.031s
| Adam | epoch: 009 | loss: 1.72288 - acc: 0.4994 | val_loss: 1.59387 - val_acc: 0.7143 -- iter: 7/7
--
Training Step: 10 | total loss: [1m[32m1.67410[0m[0m | time: 1.018s
| Adam | epoch: 010 | loss: 1.67410 - acc: 0.3925 | val_loss: 1.54277 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 11 | total loss: [1m[32m1.57482[0m[0m | time: 1.019s
| Adam | epoch: 011 | loss: 1.57482 - acc: 0.4773 | val_loss: 1.48715 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 12 | total loss: [1m[32m1.59550[0m[0m | time: 1.021s
| Adam | epoch: 012 | loss: 1.59550 - acc: 0.5196 | val_loss: 1.43774 - val_acc: 1.0000 -- iter: 7/7
--
Training Step: 13 | total loss: [1m[32m1.68833[0m[0m | time: 1.027s
| Adam | epoch: 013 | loss: 1.68833 - acc: 0.4194 | val_loss: 1.39414 - val_acc: 0.8571 -- iter: 7/7
--
Training Step: 14 | total loss: [1m[32m1.59238[0m[0m | time: 1.020s
| Adam | epoch: 014 | loss: 1.59238 - acc: 0.4816 | val_loss: 1.34550 - val_acc: 0.8571 -- iter: 7/7
--
Training Step: 15 | total loss: [1m[32m1.52356[0m[0m | time: 1.031s
| Adam | epoch: 015 | loss: 1.52356 - acc: 0.5167 | val_loss: 1.29574 - val_acc: 1.0000 -- iter: 7/7
--
None