如何使用 keras-tuner 绘制每次试验的学习曲线

How to plot learning curves for each trial using the keras-tuner

我正在使用 keras 调谐器为我的回归任务的神经网络模型选择模型,我想绘制随机搜索每次迭代的损失和验证损失的学习曲线。我该怎么做?

这是我的代码

def model_builder(hp):
  model = tf.keras.Sequential()

  layers = hp.Choice('layers', values=[1,2,3,4,5])
  units = hp.Choice('units', values=[1,2,4,8,16,32,64,128,256,512,1024])
  hp_learning_rate = hp.Choice('learning_rate', values=[1e-1, 1e-2, 1e-3, 1e-4])
  
  
  for i in range(1, layers):
    model.add(tf.keras.layers.Dense(units=units, activation='relu'))

  model.add(tf.keras.layers.Dense(2, activation='linear'))
  model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=hp_learning_rate),
                loss='mse')

  return model

tuner = RandomSearch(model_builder,
          objective="val_loss",
          max_trials=50,
      )

stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
tuner.search(X_train, y_train, epochs=200, validation_split=0.2, callbacks=[stop_early])

您只需实施自定义 keras 调谐器并设置 verbose=0 即可在每次试验后查看剧情。否则它们将被删除。尝试这样的事情以获得完全的灵活性:

自定义 keras 调谐器:

import tensorflow as tf
import keras_tuner as kt
import numpy as np
from matplotlib import pyplot as plt

class CustomTuner(kt.Tuner):
      def run_trial(self, trial, train_ds, val_ds, *args, **kwargs):
        self._display.show_hyperparameter_table(trial)
        self._display.trial_number += 1
        hp = trial.hyperparameters
        model = self.hypermodel.build(trial.hyperparameters)
        
        optimizer = model.optimizer
        train_loss_metric = tf.keras.metrics.Mean()
        valid_loss_metric = tf.keras.metrics.Mean()

        loss_fn = tf.keras.losses.MeanSquaredError()
        train_ds = train_ds.batch(32)
        val_ds = val_ds.batch(32)

        def run_train_step(data):
          x = data[0]
          y = data[1]
          with tf.GradientTape() as tape:
            logits = model(x)
            loss = loss_fn(y, logits)

          gradients = tape.gradient(loss, model.trainable_variables)
          optimizer.apply_gradients(zip(gradients, model.trainable_variables))
          
          train_loss_metric.update_state(loss)
          return loss
        
        def run_valid_step(data):
          x = data[0]
          y = data[1]
          logits = model(x)
          loss = loss_fn(y, logits)

          valid_loss_metric.update_state(loss)
          return loss

        val_losses = []
        train_losses = []
        for epoch in range(5):
          tf.print("Epoch: {}".format(epoch))
          self.on_epoch_begin(trial, model, epoch, logs={})
          for batch, data in enumerate(train_ds):
              self.on_batch_begin(trial, model, batch, logs={})
              batch_loss = float(run_train_step(data))
              self.on_batch_end(trial, model, batch, logs={"loss": batch_loss})
              if batch == 6:
                loss = train_loss_metric.result()
                tf.print("Batches: {}, Loss: {}".format(batch + 1, loss))
                break
                
          for batch, data in enumerate(val_ds):
              self.on_batch_begin(trial, model, batch, logs={})
              batch_loss = float(run_valid_step(data))
              self.on_batch_end(trial, model, batch, logs={"val_loss": batch_loss})
              if batch == 6:
                loss = valid_loss_metric.result()
                tf.print("Batches: {}, Val Loss: {}".format(batch + 1, loss))
                break

          epoch_loss = train_loss_metric.result()
          self.on_epoch_end(trial, model, epoch, logs={"loss": epoch_loss})
          val_epoch_loss = valid_loss_metric.result()
          self.on_epoch_end(trial, model, epoch, logs={"val_loss": val_epoch_loss})
          
          train_losses.append(epoch_loss)
          val_losses.append(val_epoch_loss)

          train_loss_metric.reset_states()
          valid_loss_metric.reset_states()

      
        plt.plot(train_losses)
        plt.plot(val_losses)
        plt.title('Model Loss For Trial {}'.format(self._display.trial_number))
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'val'], loc='upper left')
        plt.show()
        tf.print("Ending Trail {}".format(self._display.trial_number))
        return super(CustomTuner, self).run_trial(trial, train_ds, validation_data=val_ds, *args, **kwargs)

虚拟数据和参数:

tuner = CustomTuner(
    oracle=kt.oracles.RandomSearch(
        objective=kt.Objective("val_loss", "min"), max_trials=5
    ),
    hypermodel=model_builder
)

X_train = np.random.random((224, 2))
y_train = np.random.random((224, 2))
valx_train = np.random.random((224, 2))
valy_train = np.random.random((224, 2))
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))
val_ds = tf.data.Dataset.from_tensor_slices((valx_train, valy_train))
tuner.search(train_ds, val_ds, callbacks=[stop_early], verbose=0)

输出:

Hyperparameter    |Value             |Best Value So Far 
layers            |1                 |?                 
units             |128               |?                 
learning_rate     |0.0001            |?                 
Epoch: 0
Batches: 7, Loss: 0.6267251372337341
Batches: 7, Val Loss: 0.6261463165283203
Epoch: 1
Batches: 7, Loss: 0.6248489022254944
Batches: 7, Val Loss: 0.6242721676826477
Epoch: 2
Batches: 7, Loss: 0.6229791045188904
Batches: 7, Val Loss: 0.6224031448364258
Epoch: 3
Batches: 7, Loss: 0.6211144328117371
Batches: 7, Val Loss: 0.6205392479896545
Epoch: 4
Batches: 7, Loss: 0.619255006313324
Batches: 7, Val Loss: 0.6186805963516235

Ending Trail 1
Hyperparameter    |Value             |Best Value So Far 
layers            |3                 |1                 
units             |1024              |128               
learning_rate     |0.0001            |0.0001            
Epoch: 0
Batches: 7, Loss: 0.2655337154865265
Batches: 7, Val Loss: 0.22062525153160095
Epoch: 1
Batches: 7, Loss: 0.1646299660205841
Batches: 7, Val Loss: 0.14632494747638702
Epoch: 2
Batches: 7, Loss: 0.11420594155788422
Batches: 7, Val Loss: 0.11366432905197144
Epoch: 3
Batches: 7, Loss: 0.09950900077819824
Batches: 7, Val Loss: 0.10782861709594727
Epoch: 4
Batches: 7, Loss: 0.10018070787191391
Batches: 7, Val Loss: 0.10787512362003326

Ending Trail 2
...
...

基于您在问题中发布的代码的模型:

def model_builder(hp):
  model = tf.keras.Sequential()

  layers = hp.Choice('layers', values=[1,2,3,4,5])
  units = hp.Choice('units', values=[1,2,4,8,16,32,64,128,256,512,1024])
  hp_learning_rate = hp.Choice('learning_rate', values=[1e-1, 1e-2, 1e-3, 1e-4])
  
  for i in range(1, layers):
    model.add(tf.keras.layers.Dense(units=units, activation='relu'))

  model.add(tf.keras.layers.Dense(2, activation='linear'))
  model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=hp_learning_rate),
                loss='mse')

  return model

您或许可以简化代码,但我想您明白了。

我设法通过使用 Keras 回调在 Plotly 中做到了。

进口:

from keras.callbacks import Callback

# OPTIONNAL
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping

首先您需要创建一个自定义回调,它将在模型的每个步骤结束时调用。

step = 0
index = []
history_loss = []
history_accuracy = []

# CUSTOM PRINT FUNCTION IF YOU NEED IT
def print_step(s):
    print(str(s))

# CUSTOM CALLBACK
class KerasLogger(Callback):
    # I KNOW IT'S BAD...
    global step, index, history_loss, history_accuracy

    def __init__(self, print_fcn=print_step):
        Callback.__init__(self)
        self.print_fcn = print_step

    def on_epoch_end(self, epoch, logs=None):
        # STORE NEW VALUES EACH STEP
        index.append(step)
        step += 1
        for k, v in logs.items():
            if k == 'val_loss':
                 history_loss.append(v)
             if k == 'val_accuracy':
                 history_accuracy.append(v)

        # PLOT YOUR GRAPH HERE
        plot_my_ia(index, history_accuracy, history_loss)

其次,您需要使用回调参数将其绑定到您的模型:

# CUSTOM CALLBACK : THE ONE YOU NEED
custom_logger = [KerasLogger()]

# EXAMPLE OF KERAS CALLBACK WHEN EARLY STOPPING
early_stopping_logger = [EarlyStopping('val_loss', patience=10, mode='min', restore_best_weights=True)]

# EXAMPLE OF MODELCHECKPOINT
best_model_logger = ModelCheckpoint(path_file, monitor='val_loss', save_best_only=True, save_weights_only=True, mode='min')

# CREATE YOUR MODEL
model = create_my_smart_model(...your parameters ...)
model.compile(...your parameters ...)

# BIND THE CALLBACKS
model.fit(...your other parameters ... , callbacks=[custom_logger, early_stopping_logger, best_model_logger])

请注意,有时您必须将回调放在 []

结果:

Plotly graph of accuracy / loss updated each step