如何在多变量模型中得到平均 val_loss 和 val_accuracy?
How to get averaged val_loss and val_accuracy in multivariate model?
我建模过多元模型,上次问过类似的问题。
我知道如何获得平均损失值和准确度值,但我的模型仍然无法识别平均 val_loss 和 val_acc。
你能告诉我如何度过这个难关吗?
我附上下面的代码。谢谢
此代码用于获取平均损失和准确度。
`` class MergeMetrics(tf.keras.callbacks.Callback):
def __init__(self,**kargs):
super(MergeMetrics,self).__init__(**kargs)
def on_epoch_begin(self,epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
logs['merge_mse'] = np.mean([logs[model] for model in logs.keys() if 'mse' in model])
logs['merge_mae'] = np.mean([logs[model] for model in logs.keys() if 'mae' in model])
logs['merge_r_square'] = np.mean([logs[model] for model in logs.keys() if 'r_square' in model])
logs['val_merge_mse'] = np.mean([logs[model] for model in logs.keys() if 'val_mse' in model])
logs['val_merge_mae'] = np.mean([logs[model] for model in logs.keys() if 'val_mae' in model])
logs['val_merge_r_square'] = np.mean([logs[model] for model in logs.keys() if 'val_r_square' in model]) ```
这是我模型的代码和损失图。
model = Model(inputs=visible, outputs=listDense)
losses = {"output{}".format(j+1):'mse' for j in range(len(listDense))}
# tie losses together
model.compile(optimizer='adam', loss=losses, metrics=["mse", "mae", r_square])
#averaging loss and accuracy
checkpoint = MergeMetrics()
# fit model
hist = model.fit(X_tr, [listofdepth_tr[s] for s in range(len(listofdepth_tr))], use_multiprocessing=True, workers=6, epochs=100, callbacks=[checkpoint], verbose=0, validation_data = (X_te, [listofdepth_te[s] for s in range(len(listofdepth_te))]))
#-----------------------------------------------------------------------------
# Plot learning curves including R^2 and RMSE
#-----------------------------------------------------------------------------
# plot training curve for R^2 (beware of scale, starts very low negative)
fig = plt.figure()
ax1 = fig.add_subplot(3,1,1)
ax1.plot(hist.history['merge_r_square'])
ax1.plot(hist.history['val_merge_r_square'])
ax1.set_title('Accuracy : model R^2')
ax1.set_ylabel('R^2')
ax1.legend(['train', 'test'], loc='upper left')
# plot training curve for rmse
ax2 = fig.add_subplot(3,1,2)
ax2.plot(hist.history['merge_mse'])
ax2.plot(hist.history['val_merge_mse'])
ax2.set_title('Accuracy : mse')
ax2.set_ylabel('mse')
ax2.legend(['train', 'test'], loc='upper left')
# plot training curve for rmse
ax3 = fig.add_subplot(3,1,3)
ax3.plot(hist.history['loss'])
ax3.plot(hist.history['val_loss'])
ax3.set_title('Loss : mse')
ax3.set_ylabel('mse')
ax3.set_xlabel('epoch')
ax3.legend(['train', 'test'], loc='upper left')
使用验证时请注意...没有任何内容包含序列 'val_mse',因为它是 'val_outputname_mse'。如果您还使用验证,请注意不要混合训练的 mse 和验证的 mse。以上正确方法
from string import digits # <=== import digits
def clear_name(output_name):
return output_name.translate(str.maketrans('', '', digits))
class MergeMetrics(Callback):
def __init__(self,**kargs):
super(MergeMetrics,self).__init__(**kargs)
def on_epoch_begin(self,epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
logs['merge_mse'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'dense__mse'])
logs['merge_mae'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'dense__mae'])
logs['val_merge_mse'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'val_dense__mse'])
logs['val_merge_mae'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'val_dense__mae'])
X = np.random.uniform(0,1, (1000,10))
y1 = np.random.uniform(0,1, 1000)
y2 = np.random.uniform(0,1, 1000)
inp = Input((10,))
x = Dense(32, activation='relu')(inp)
out1 = Dense(1)(x)
out2 = Dense(1)(x)
m = Model(inp, [out1,out2])
m.compile('adam','mae', metrics=['mse','mae'])
checkpoint = MergeMetrics()
hist = m.fit(X, [y1,y2], epochs=10, callbacks=[checkpoint], validation_split=0.1)
plt.plot(hist.history['merge_mse'])
plt.plot(hist.history['val_merge_mse'])
plt.title('Accuracy : mse')
plt.ylabel('mse')
plt.legend(['train', 'test'], loc='upper left')
我建模过多元模型,上次问过类似的问题。 我知道如何获得平均损失值和准确度值,但我的模型仍然无法识别平均 val_loss 和 val_acc。 你能告诉我如何度过这个难关吗? 我附上下面的代码。谢谢
此代码用于获取平均损失和准确度。
`` class MergeMetrics(tf.keras.callbacks.Callback):
def __init__(self,**kargs):
super(MergeMetrics,self).__init__(**kargs)
def on_epoch_begin(self,epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
logs['merge_mse'] = np.mean([logs[model] for model in logs.keys() if 'mse' in model])
logs['merge_mae'] = np.mean([logs[model] for model in logs.keys() if 'mae' in model])
logs['merge_r_square'] = np.mean([logs[model] for model in logs.keys() if 'r_square' in model])
logs['val_merge_mse'] = np.mean([logs[model] for model in logs.keys() if 'val_mse' in model])
logs['val_merge_mae'] = np.mean([logs[model] for model in logs.keys() if 'val_mae' in model])
logs['val_merge_r_square'] = np.mean([logs[model] for model in logs.keys() if 'val_r_square' in model]) ```
这是我模型的代码和损失图。
model = Model(inputs=visible, outputs=listDense)
losses = {"output{}".format(j+1):'mse' for j in range(len(listDense))}
# tie losses together
model.compile(optimizer='adam', loss=losses, metrics=["mse", "mae", r_square])
#averaging loss and accuracy
checkpoint = MergeMetrics()
# fit model
hist = model.fit(X_tr, [listofdepth_tr[s] for s in range(len(listofdepth_tr))], use_multiprocessing=True, workers=6, epochs=100, callbacks=[checkpoint], verbose=0, validation_data = (X_te, [listofdepth_te[s] for s in range(len(listofdepth_te))]))
#-----------------------------------------------------------------------------
# Plot learning curves including R^2 and RMSE
#-----------------------------------------------------------------------------
# plot training curve for R^2 (beware of scale, starts very low negative)
fig = plt.figure()
ax1 = fig.add_subplot(3,1,1)
ax1.plot(hist.history['merge_r_square'])
ax1.plot(hist.history['val_merge_r_square'])
ax1.set_title('Accuracy : model R^2')
ax1.set_ylabel('R^2')
ax1.legend(['train', 'test'], loc='upper left')
# plot training curve for rmse
ax2 = fig.add_subplot(3,1,2)
ax2.plot(hist.history['merge_mse'])
ax2.plot(hist.history['val_merge_mse'])
ax2.set_title('Accuracy : mse')
ax2.set_ylabel('mse')
ax2.legend(['train', 'test'], loc='upper left')
# plot training curve for rmse
ax3 = fig.add_subplot(3,1,3)
ax3.plot(hist.history['loss'])
ax3.plot(hist.history['val_loss'])
ax3.set_title('Loss : mse')
ax3.set_ylabel('mse')
ax3.set_xlabel('epoch')
ax3.legend(['train', 'test'], loc='upper left')
使用验证时请注意...没有任何内容包含序列 'val_mse',因为它是 'val_outputname_mse'。如果您还使用验证,请注意不要混合训练的 mse 和验证的 mse。以上正确方法
from string import digits # <=== import digits
def clear_name(output_name):
return output_name.translate(str.maketrans('', '', digits))
class MergeMetrics(Callback):
def __init__(self,**kargs):
super(MergeMetrics,self).__init__(**kargs)
def on_epoch_begin(self,epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
logs['merge_mse'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'dense__mse'])
logs['merge_mae'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'dense__mae'])
logs['val_merge_mse'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'val_dense__mse'])
logs['val_merge_mae'] = np.mean([logs[m] for m in logs.keys() if clear_name(m) == 'val_dense__mae'])
X = np.random.uniform(0,1, (1000,10))
y1 = np.random.uniform(0,1, 1000)
y2 = np.random.uniform(0,1, 1000)
inp = Input((10,))
x = Dense(32, activation='relu')(inp)
out1 = Dense(1)(x)
out2 = Dense(1)(x)
m = Model(inp, [out1,out2])
m.compile('adam','mae', metrics=['mse','mae'])
checkpoint = MergeMetrics()
hist = m.fit(X, [y1,y2], epochs=10, callbacks=[checkpoint], validation_split=0.1)
plt.plot(hist.history['merge_mse'])
plt.plot(hist.history['val_merge_mse'])
plt.title('Accuracy : mse')
plt.ylabel('mse')
plt.legend(['train', 'test'], loc='upper left')