1 通道图像分类的 UnimplementedError
UnimplementedError with 1 channel image classification
我是 CNN 新手,遇到错误 UnimplementedError: Fused conv implementation does not support grouped convolutions for now. [[node sequential_3/activation_15/Relu (defined at <ipython-input-37-0f4d43f688ae>:5) ]] [Op:__inference_test_function_5422]
我正在遵循 this video 的代码,为 3 channel images
工作,但我需要使用 1 channel
图像,但它不起作用。以下是我的代码,我将特定行更改为-
model.add(Conv2D(32, (3, 3), input_shape = (256, 256, 1)))
还添加了 color_mode = 'grayscale'
在
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height), color_mode = 'grayscale', batch_size=batch_size, class_mode='binary')
但是,没有摆脱错误。有人可以帮我弄清楚吗?完整代码如下-
##import libraries here
img_width, img_height = 256, 256
train_dir = './train'
valid_dir = './validation'
train_samples = 35
valid_samples = 15
epochs = 7
batch_size = 5
if K.image_data_format() == 'channels_first':
shape = (1, img_width, img_height)
else:
shape = (img_width, img_height, 1)
train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
valid_generator = test_datagen.flow_from_directory(valid_dir, target_size=(img_width, img_height),
batch_size=batch_size, class_mode='binary')
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape = shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
## add more layers if you want
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit_generator(train_generator,
steps_per_epoch=train_samples // batch_size,
epochs=epochs,
validation_data=valid_generator,
validation_steps=valid_samples // batch_size)
问题可能是由于模型中定义的通道数与数据集中的实际通道数不一致造成的。我建议在将图像输入模型之前将它们显式转换为灰度:
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_ds = tf.data.Dataset.from_generator(
lambda: img_gen.flow_from_directory(data_dir, batch_size=32, shuffle=True),
output_types=(tf.float32, tf.float32))
def convert_to_grayscale(image, label):
return tf.image.rgb_to_grayscale(image), label
images, _ = next(iter(train_ds.take(1)))
image = images[0]
print('Before conversion --> ', image.shape)
train_ds = train_ds.map(convert_to_grayscale)
images, _ = next(iter(train_ds.take(1)))
image = images[0]
print('After conversion --> ', image.shape)
Found 3670 files belonging to 5 classes.
Using 2936 files for training.
Before conversion --> (256, 256, 3)
After conversion --> (256, 256, 1)
这是一个完整的工作示例:
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
train_generator = img_gen.flow_from_directory(data_dir, target_size=(256, 256),
batch_size=32,
class_mode='sparse')
def convert_to_grayscale(images, labels):
return tf.image.rgb_to_grayscale(images), labels
train_ds = tf.data.Dataset.from_generator(lambda: train_generator, output_types=(tf.float32, tf.float32))
train_ds = train_ds.map(convert_to_grayscale)
num_classes = 5
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(256, 256, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
model.fit(train_ds, epochs=10)
我是 CNN 新手,遇到错误 UnimplementedError: Fused conv implementation does not support grouped convolutions for now. [[node sequential_3/activation_15/Relu (defined at <ipython-input-37-0f4d43f688ae>:5) ]] [Op:__inference_test_function_5422]
我正在遵循 this video 的代码,为 3 channel images
工作,但我需要使用 1 channel
图像,但它不起作用。以下是我的代码,我将特定行更改为-
model.add(Conv2D(32, (3, 3), input_shape = (256, 256, 1)))
还添加了 color_mode = 'grayscale'
在
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height), color_mode = 'grayscale', batch_size=batch_size, class_mode='binary')
但是,没有摆脱错误。有人可以帮我弄清楚吗?完整代码如下-
##import libraries here
img_width, img_height = 256, 256
train_dir = './train'
valid_dir = './validation'
train_samples = 35
valid_samples = 15
epochs = 7
batch_size = 5
if K.image_data_format() == 'channels_first':
shape = (1, img_width, img_height)
else:
shape = (img_width, img_height, 1)
train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='binary')
valid_generator = test_datagen.flow_from_directory(valid_dir, target_size=(img_width, img_height),
batch_size=batch_size, class_mode='binary')
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape = shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
## add more layers if you want
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit_generator(train_generator,
steps_per_epoch=train_samples // batch_size,
epochs=epochs,
validation_data=valid_generator,
validation_steps=valid_samples // batch_size)
问题可能是由于模型中定义的通道数与数据集中的实际通道数不一致造成的。我建议在将图像输入模型之前将它们显式转换为灰度:
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
train_ds = tf.data.Dataset.from_generator(
lambda: img_gen.flow_from_directory(data_dir, batch_size=32, shuffle=True),
output_types=(tf.float32, tf.float32))
def convert_to_grayscale(image, label):
return tf.image.rgb_to_grayscale(image), label
images, _ = next(iter(train_ds.take(1)))
image = images[0]
print('Before conversion --> ', image.shape)
train_ds = train_ds.map(convert_to_grayscale)
images, _ = next(iter(train_ds.take(1)))
image = images[0]
print('After conversion --> ', image.shape)
Found 3670 files belonging to 5 classes.
Using 2936 files for training.
Before conversion --> (256, 256, 3)
After conversion --> (256, 256, 1)
这是一个完整的工作示例:
import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
train_generator = img_gen.flow_from_directory(data_dir, target_size=(256, 256),
batch_size=32,
class_mode='sparse')
def convert_to_grayscale(images, labels):
return tf.image.rgb_to_grayscale(images), labels
train_ds = tf.data.Dataset.from_generator(lambda: train_generator, output_types=(tf.float32, tf.float32))
train_ds = train_ds.map(convert_to_grayscale)
num_classes = 5
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu', input_shape=(256, 256, 1)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
model.fit(train_ds, epochs=10)