学习时访问 keras / tensorflow 中的准确性
Access accuracy in keras / tensorflow while learning
我想访问神经网络的准确性(或损失),以便根据它进行训练(“课程学习”)。
有没有一种方法可以访问自定义层中的模型精度?
一种方法是创建自定义回调并在每个纪元后跟踪准确性(或损失)。然后,您可以在自定义层的训练期间访问此变量。一个缺点是您必须在 model.compile
中设置 run_eagerly=True
才能使此方法起作用:
import tensorflow as tf
result_dic = {"epochs": []}
logging_callback = tf.keras.callbacks.LambdaCallback(
on_epoch_end=lambda epoch, logs:
result_dic["epochs"].append({
'epoch': epoch + 1,
'acc': str(logs['acc'])
}))
class Linear(tf.keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
if result_dic['epochs']:
tf.print(result_dic['epochs'][-1])
return tf.matmul(inputs, self.w) + self.b
inputs = tf.keras.layers.Input((16,))
linear_layer = Linear(32, 16)
x = linear_layer(inputs)
outputs = tf.keras.layers.Dense(1, 'sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc'], run_eagerly=True)
x = tf.random.normal((5,16))
y = tf.random.uniform((5,), maxval=2, dtype=tf.int32)
model.fit(x, y, batch_size=2, epochs=4, callbacks=[logging_callback])
Epoch 1/4
3/3 [==============================] - 0s 15ms/step - loss: 0.8525 - acc: 0.0000e+00
Epoch 2/4
{'acc': '0.0', 'epoch': 1}
1/3 [=========>....................] - ETA: 0s - loss: 0.7647 - acc: 0.5000{'acc': '0.0', 'epoch': 1}
{'acc': '0.0', 'epoch': 1}
3/3 [==============================] - 0s 19ms/step - loss: 0.7834 - acc: 0.2000
Epoch 3/4
{'acc': '0.20000000298023224', 'epoch': 2}
1/3 [=========>....................] - ETA: 0s - loss: 0.7253 - acc: 0.5000{'acc': '0.20000000298023224', 'epoch': 2}
{'acc': '0.20000000298023224', 'epoch': 2}
3/3 [==============================] - 0s 18ms/step - loss: 0.7239 - acc: 0.2000
Epoch 4/4
{'acc': '0.20000000298023224', 'epoch': 3}
1/3 [=========>....................] - ETA: 0s - loss: 0.7091 - acc: 0.5000{'acc': '0.20000000298023224', 'epoch': 3}
{'acc': '0.20000000298023224', 'epoch': 3}
3/3 [==============================] - 0s 19ms/step - loss: 0.6662 - acc: 0.6000
<keras.callbacks.History at 0x7f5319f6a910>
除了字典,您还可以使用简单的列表:
import tensorflow as tf
results = [0.0]
class LossAccCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
global results
results.append(logs['acc'])
class Linear(tf.keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
tf.print(results[-1])
return tf.matmul(inputs, self.w) + self.b
epochs = 4
inputs = tf.keras.layers.Input((16,))
linear_layer = Linear(32, 16)
x = linear_layer(inputs)
outputs = tf.keras.layers.Dense(1, 'sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc'], run_eagerly=True)
x = tf.random.normal((5,16))
y = tf.random.uniform((5,), maxval=2, dtype=tf.int32)
model.fit(x, y, batch_size=2, epochs=epochs, callbacks=[LossAccCallback()])
我想访问神经网络的准确性(或损失),以便根据它进行训练(“课程学习”)。 有没有一种方法可以访问自定义层中的模型精度?
一种方法是创建自定义回调并在每个纪元后跟踪准确性(或损失)。然后,您可以在自定义层的训练期间访问此变量。一个缺点是您必须在 model.compile
中设置 run_eagerly=True
才能使此方法起作用:
import tensorflow as tf
result_dic = {"epochs": []}
logging_callback = tf.keras.callbacks.LambdaCallback(
on_epoch_end=lambda epoch, logs:
result_dic["epochs"].append({
'epoch': epoch + 1,
'acc': str(logs['acc'])
}))
class Linear(tf.keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
if result_dic['epochs']:
tf.print(result_dic['epochs'][-1])
return tf.matmul(inputs, self.w) + self.b
inputs = tf.keras.layers.Input((16,))
linear_layer = Linear(32, 16)
x = linear_layer(inputs)
outputs = tf.keras.layers.Dense(1, 'sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc'], run_eagerly=True)
x = tf.random.normal((5,16))
y = tf.random.uniform((5,), maxval=2, dtype=tf.int32)
model.fit(x, y, batch_size=2, epochs=4, callbacks=[logging_callback])
Epoch 1/4
3/3 [==============================] - 0s 15ms/step - loss: 0.8525 - acc: 0.0000e+00
Epoch 2/4
{'acc': '0.0', 'epoch': 1}
1/3 [=========>....................] - ETA: 0s - loss: 0.7647 - acc: 0.5000{'acc': '0.0', 'epoch': 1}
{'acc': '0.0', 'epoch': 1}
3/3 [==============================] - 0s 19ms/step - loss: 0.7834 - acc: 0.2000
Epoch 3/4
{'acc': '0.20000000298023224', 'epoch': 2}
1/3 [=========>....................] - ETA: 0s - loss: 0.7253 - acc: 0.5000{'acc': '0.20000000298023224', 'epoch': 2}
{'acc': '0.20000000298023224', 'epoch': 2}
3/3 [==============================] - 0s 18ms/step - loss: 0.7239 - acc: 0.2000
Epoch 4/4
{'acc': '0.20000000298023224', 'epoch': 3}
1/3 [=========>....................] - ETA: 0s - loss: 0.7091 - acc: 0.5000{'acc': '0.20000000298023224', 'epoch': 3}
{'acc': '0.20000000298023224', 'epoch': 3}
3/3 [==============================] - 0s 19ms/step - loss: 0.6662 - acc: 0.6000
<keras.callbacks.History at 0x7f5319f6a910>
除了字典,您还可以使用简单的列表:
import tensorflow as tf
results = [0.0]
class LossAccCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
global results
results.append(logs['acc'])
class Linear(tf.keras.layers.Layer):
def __init__(self, units=32, input_dim=32):
super(Linear, self).__init__()
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_dim, units), dtype="float32"),
trainable=True,
)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(units,), dtype="float32"), trainable=True
)
def call(self, inputs):
tf.print(results[-1])
return tf.matmul(inputs, self.w) + self.b
epochs = 4
inputs = tf.keras.layers.Input((16,))
linear_layer = Linear(32, 16)
x = linear_layer(inputs)
outputs = tf.keras.layers.Dense(1, 'sigmoid')(x)
model = tf.keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['acc'], run_eagerly=True)
x = tf.random.normal((5,16))
y = tf.random.uniform((5,), maxval=2, dtype=tf.int32)
model.fit(x, y, batch_size=2, epochs=epochs, callbacks=[LossAccCallback()])