ValueError: Metric should be a callable, received
ValueError: Metric should be a callable, received
不知道我做的对不对。但是我想用logcosh
作为损失。所以我把它放在一个 class 里面,如下所示:
class Logcosh(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred ):
return tf.keras.losses.logcosh(y_true, y_pred)
然后我调用了我的代码:
LR = 0.0001
optim = keras.optimizers.Adam(LR)
dice_loss_se2 =Logcosh()
mae = tf.keras.losses.MeanAbsoluteError( )
metrics = [ mae,sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5) , dice_loss_se2]
model.compile(optimizer=optim,loss= dice_loss_se2,metrics= metrics)
LR = 0.0001
optim = keras.optimizers.Adam(LR)
train_gen = DataGen(train_ids, train_path, image_size=image_size, batch_size=batch_size)
valid_gen = DataGen(valid_ids, train_path, image_size=image_size, batch_size=batch_size)
train_steps = len(train_ids)//batch_size
valid_steps = len(valid_ids)//batch_size
history =model.fit_generator(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps,
epochs=epochs)
但是我得到以下错误:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-59-fb657b903f55> in <module>
23 valid_steps = len(valid_ids)//batch_size
24
---> 25 history =model.fit_generator(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps,
26 epochs=epochs)
~/.local/lib/python3.8/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
2207 'Please use `Model.fit`, which supports generators.',
2208 stacklevel=2)
-> 2209 return self.fit(
2210 generator,
2211 steps_per_epoch=steps_per_epoch,
~/.local/lib/python3.8/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 864, in train_step
return self.compute_metrics(x, y, y_pred, sample_weight)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 957, in compute_metrics
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 438, in update_state
self.build(y_pred, y_true)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 358, in build
self._metrics = tf.__internal__.nest.map_structure_up_to(y_pred, self._get_metric_objects,
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 484, in _get_metric_objects
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 484, in <listcomp>
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 538, in _get_metric_object
raise ValueError(
ValueError: Metric should be a callable, received: <__main__.Logcosh object at 0x2b1db47964c0>
model.fit_generator虽然很老,但是思路是一样的。
LogCosh 将同一性标识为对数刻度,您将如何在有限的信息下呈现相似性?
您会看到它具有分类任务的对数行为。
[样本]:
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
import numpy as np
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class and Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class Logcosh(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred ):
return tf.keras.losses.logcosh(y_true, y_pred)
def gen():
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_generator.flow_from_directory(
directory,
target_size=(32, 32),
batch_size=BATCH_SIZE,
class_mode='categorical', # None # categorical # binary
subset='training')
return train_generator
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
LR = 0.000001
directory = "F:\datasets\downloads\Actors\train\"
train_steps = 33
val_steps = 1
epochs = 1500
BATCH_SIZE = 33
IMG_SIZE = (32, 32)
checkpoint_path = "F:\models\checkpoint\" + os.path.basename(__file__).split('.')[0] + "\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\models\checkpoint\" + os.path.basename(__file__).split('.')[0] + "\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
train_generator = gen()
val_generator = train_generator
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Reshape((256, 32 * 32)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(2),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Adam(LR)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = Logcosh()# <__main__.Logcosh object at 0x000001EFAFBB8DF0>
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit_generator(train_generator, epochs=epochs, verbose=1, callbacks=[custom_callback])
PATH = os.path.join('F:\datasets\downloads\Actors\train\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\datasets\downloads\Actors\train\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label_actual = [ 'Pikaploy', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
image = tfio.experimental.color.rgba_to_rgb( image )
list_file.append(image)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
image = tfio.experimental.color.rgba_to_rgb( image )
list_file.append(image)
plt.figure(figsize=(6, 6))
plt.title("Cat emotions recognition")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
[输出]:
不知道我做的对不对。但是我想用logcosh
作为损失。所以我把它放在一个 class 里面,如下所示:
class Logcosh(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred ):
return tf.keras.losses.logcosh(y_true, y_pred)
然后我调用了我的代码:
LR = 0.0001
optim = keras.optimizers.Adam(LR)
dice_loss_se2 =Logcosh()
mae = tf.keras.losses.MeanAbsoluteError( )
metrics = [ mae,sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5) , dice_loss_se2]
model.compile(optimizer=optim,loss= dice_loss_se2,metrics= metrics)
LR = 0.0001
optim = keras.optimizers.Adam(LR)
train_gen = DataGen(train_ids, train_path, image_size=image_size, batch_size=batch_size)
valid_gen = DataGen(valid_ids, train_path, image_size=image_size, batch_size=batch_size)
train_steps = len(train_ids)//batch_size
valid_steps = len(valid_ids)//batch_size
history =model.fit_generator(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps,
epochs=epochs)
但是我得到以下错误:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-59-fb657b903f55> in <module>
23 valid_steps = len(valid_ids)//batch_size
24
---> 25 history =model.fit_generator(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps,
26 epochs=epochs)
~/.local/lib/python3.8/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
2207 'Please use `Model.fit`, which supports generators.',
2208 stacklevel=2)
-> 2209 return self.fit(
2210 generator,
2211 steps_per_epoch=steps_per_epoch,
~/.local/lib/python3.8/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~/.local/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 864, in train_step
return self.compute_metrics(x, y, y_pred, sample_weight)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/training.py", line 957, in compute_metrics
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 438, in update_state
self.build(y_pred, y_true)
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 358, in build
self._metrics = tf.__internal__.nest.map_structure_up_to(y_pred, self._get_metric_objects,
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 484, in _get_metric_objects
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 484, in <listcomp>
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "/storage/home/mm8755/.local/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 538, in _get_metric_object
raise ValueError(
ValueError: Metric should be a callable, received: <__main__.Logcosh object at 0x2b1db47964c0>
model.fit_generator虽然很老,但是思路是一样的。
LogCosh 将同一性标识为对数刻度,您将如何在有限的信息下呈现相似性?
您会看到它具有分类任务的对数行为。
[样本]:
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
import numpy as np
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class and Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class Logcosh(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred ):
return tf.keras.losses.logcosh(y_true, y_pred)
def gen():
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_generator.flow_from_directory(
directory,
target_size=(32, 32),
batch_size=BATCH_SIZE,
class_mode='categorical', # None # categorical # binary
subset='training')
return train_generator
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
LR = 0.000001
directory = "F:\datasets\downloads\Actors\train\"
train_steps = 33
val_steps = 1
epochs = 1500
BATCH_SIZE = 33
IMG_SIZE = (32, 32)
checkpoint_path = "F:\models\checkpoint\" + os.path.basename(__file__).split('.')[0] + "\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\models\checkpoint\" + os.path.basename(__file__).split('.')[0] + "\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
train_generator = gen()
val_generator = train_generator
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Reshape((256, 32 * 32)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(2),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Adam(LR)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = Logcosh()# <__main__.Logcosh object at 0x000001EFAFBB8DF0>
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit_generator(train_generator, epochs=epochs, verbose=1, callbacks=[custom_callback])
PATH = os.path.join('F:\datasets\downloads\Actors\train\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\datasets\downloads\Actors\train\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label_actual = [ 'Pikaploy', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
image = tfio.experimental.color.rgba_to_rgb( image )
list_file.append(image)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
image = tfio.experimental.color.rgba_to_rgb( image )
list_file.append(image)
plt.figure(figsize=(6, 6))
plt.title("Cat emotions recognition")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
[输出]: