ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0
ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0
我是 TF 和 Keras 的新手。我使用以下代码训练并保存了模型
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
train_data_gen = ImageDataGenerator(rescale=1 / 255)
validation_data_gen = ImageDataGenerator(rescale=1 / 255)
# Flow training images in batches of 120 using train_data_gen generator
train_generator = train_data_gen.flow_from_directory(
'datasets/train/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=120,
class_mode='binary')
validation_generator = validation_data_gen.flow_from_directory(
'datasets/valid/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=19,
class_mode='binary',
shuffle=False)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1
# where 0 for 1 class ('bad') and 1 for the other ('good')
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics='accuracy')
model.fit(train_generator,
steps_per_epoch=10,
epochs=25,
verbose=1,
validation_data=validation_generator,
validation_steps=8)
print("Evaluating the model :")
model.evaluate(validation_generator)
print("Predicting :")
validation_generator.reset()
predictions = model.predict(validation_generator, verbose=1)
print(predictions)
model.save("models/saved")
然后使用
将此模型转换为tflite
import tensorflow as tf
def saved_model_to_tflite(model_path, quantize):
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
model_saving_path = "models/converted/model.tflite"
if quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
model_saving_path = "models/converted/model-quantized.tflite"
tflite_model = converter.convert()
with open(model_saving_path, 'wb') as f:
f.write(tflite_model)
然后使用
测试单个图像的模型
import tensorflow as tf
def run_tflite_model(tflite_file, test_image):
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
print(interpreter.get_input_details())
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
prediction = output.argmax()
return prediction
main.py
if __name__ == '__main__':
converted_model = "models/converted/model.tflite"
bad_image_path = "datasets/experiment/bad/b.png"
good_image_path = "datasets/experiment/good/g.png"
img = io.imread(bad_image_path)
resized = resize(img, (200, 200)).astype('float32')
prediction = run_tflite_model(converted_model, resized)
print(prediction)
但即使我将图像的大小调整为 200 x 200,我还是得到了
ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0.
如果我这样做 print(interpreter.get_input_details())
[{'name': 'serving_default_conv2d_input:0', 'index': 0, 'shape': array([ 1, 200, 200, 3], dtype=int32), 'shape_signature': array([ -1, 200, 200, 3], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}]
所以看起来输入的形状是 'shape': array([ 1, 200, 200, 3]
我确实得到了部分 200, 200, 3
看起来 1
是根据 docs?
的批量大小
如何从输入形状中删除批量大小?
您可以使用 expand_dims:
扩展维度,而不是删除图中的批量大小
test_image = np.expand_dims(test_image, axis=0)
对于android,您可以通过使用循环轻松地从浮点[32][32][3]输入数组准备浮点[1][32][32][3]输入数组复制值。
我是 TF 和 Keras 的新手。我使用以下代码训练并保存了模型
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2.rmsprop import RMSprop
train_data_gen = ImageDataGenerator(rescale=1 / 255)
validation_data_gen = ImageDataGenerator(rescale=1 / 255)
# Flow training images in batches of 120 using train_data_gen generator
train_generator = train_data_gen.flow_from_directory(
'datasets/train/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=120,
class_mode='binary')
validation_generator = validation_data_gen.flow_from_directory(
'datasets/valid/',
classes=['bad', 'good'],
target_size=(200, 200),
batch_size=19,
class_mode='binary',
shuffle=False)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(200, 200, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1
# where 0 for 1 class ('bad') and 1 for the other ('good')
tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics='accuracy')
model.fit(train_generator,
steps_per_epoch=10,
epochs=25,
verbose=1,
validation_data=validation_generator,
validation_steps=8)
print("Evaluating the model :")
model.evaluate(validation_generator)
print("Predicting :")
validation_generator.reset()
predictions = model.predict(validation_generator, verbose=1)
print(predictions)
model.save("models/saved")
然后使用
将此模型转换为tfliteimport tensorflow as tf
def saved_model_to_tflite(model_path, quantize):
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
model_saving_path = "models/converted/model.tflite"
if quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
model_saving_path = "models/converted/model-quantized.tflite"
tflite_model = converter.convert()
with open(model_saving_path, 'wb') as f:
f.write(tflite_model)
然后使用
测试单个图像的模型import tensorflow as tf
def run_tflite_model(tflite_file, test_image):
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
print(interpreter.get_input_details())
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
prediction = output.argmax()
return prediction
main.py
if __name__ == '__main__':
converted_model = "models/converted/model.tflite"
bad_image_path = "datasets/experiment/bad/b.png"
good_image_path = "datasets/experiment/good/g.png"
img = io.imread(bad_image_path)
resized = resize(img, (200, 200)).astype('float32')
prediction = run_tflite_model(converted_model, resized)
print(prediction)
但即使我将图像的大小调整为 200 x 200,我还是得到了
ValueError: Cannot set tensor: Dimension mismatch. Got 3 but expected 4 for input 0.
如果我这样做 print(interpreter.get_input_details())
[{'name': 'serving_default_conv2d_input:0', 'index': 0, 'shape': array([ 1, 200, 200, 3], dtype=int32), 'shape_signature': array([ -1, 200, 200, 3], dtype=int32), 'dtype': <class 'numpy.float32'>, 'quantization': (0.0, 0), 'quantization_parameters': {'scales': array([], dtype=float32), 'zero_points': array([], dtype=int32), 'quantized_dimension': 0}, 'sparsity_parameters': {}}]
所以看起来输入的形状是 'shape': array([ 1, 200, 200, 3]
我确实得到了部分 200, 200, 3
看起来 1
是根据 docs?
如何从输入形状中删除批量大小?
您可以使用 expand_dims:
扩展维度,而不是删除图中的批量大小test_image = np.expand_dims(test_image, axis=0)
对于android,您可以通过使用循环轻松地从浮点[32][32][3]输入数组准备浮点[1][32][32][3]输入数组复制值。