层 sequential_10 的输入 0 与层不兼容::预期 min_ndim=4,发现 ndim=2

Input 0 of layer sequential_10 is incompatible with the layer: : expected min_ndim=4, found ndim=2

在重塑 xtraindata 和 xtest 数据之前,出现错误: “层 sequential_10 的输入 0 与层不兼容:预期 min_ndim=4,发现 ndim=2。”。在按顺序将 xtraindata 和 xtestdata 重塑为 (1400,24,24,1) 和 (600,24,24,1) 之后。然后我得到这样的错误: “不兼容的形状:[32,1] 与 [32,6,6,1] [[node mean_squared_error/SquaredDifference(定义在 C:\Users\User\Documents\car_person.py:188)]] [Op:__inference_test_function_7945]

函数调用堆栈: test_function” 我无法使评估函数在创建的模型上运行。如何让测试数据兼容模型?

import numpy as np
import matplotlib.pyplot as plt
import os
import time
import cv2
import pandas as pd
import tensorflow as tf
import itertools as it
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
  except RuntimeError as e:
    print(e)


#gpu_options=K.tf.GPUOptions(per_process_gpu_memory_fraction=0.35)

path = "C:/Users/User/Desktop/tunel_data"
training_data=[]

def create_training_data(training_data, path):
    categories = ["tunel_data_other", "tunel_data_car"]
    for category in categories:
        path=os.path.join(path, category)
        for img in os.listdir(path):
            print(img)
            if category=="tunel_data_other":
                class_num= 0
                #image=Image.open(img)
                #new_image = image.resize((50, 50))
                #new_image.save('car'+img.index())
                #try:
                image_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)/255
                new_array = cv2.resize(image_array, (24, 24))
                print(new_array.shape)
                training_data.append([new_array, class_num])
                #except:
                    #pass
            elif category=="tunel_data_car":
                class_num = 1
                #image=Image.open(img)
                #new_image = image.resize((50, 50))
                #new_image.save('person'+img.index())
                #try:
                image_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)/255
                new_array = cv2.resize(image_array, (24, 24))
                print(new_array.shape)
                training_data.append([new_array, class_num])
                #except:
                    #pass
        path = "C:/Users/User/Desktop/tunel_data"
    return training_data

create_training_data(training_data, path)

x=[]
y=[]

for i in range(len(training_data)):
    x.append(training_data[i][0])
    y.append(training_data[i][1])
#print(x)
#print(y)
     
x = np.array(x).reshape(2000, 576)
"""
principle_features = PCA(n_components=250)
feature = principle_features.fit_transform(x)
"""
feature = x
label = y

feature_df = pd.DataFrame(feature)

#df = DataFrame (People_List,columns=['First_Name','Last_Name','Age'])

label_df = pd.DataFrame(label)


data = pd.concat([feature_df, label_df], axis=1).to_csv('complete.csv')


data = pd.read_csv("complete.csv")

data = data.sample(frac=1).reset_index(drop=True)

print(data)

x_test, x_train, y_test, y_train = train_test_split(x, y, test_size=0.7, random_state=65)
xtraindata=pd.DataFrame(data=x_train[:,:])
xtestdata=pd.DataFrame(data=x_test[:,:])
print(xtraindata)

ytraindata=pd.DataFrame(data=y_train[:])
ytestdata=pd.DataFrame(data=y_test[:])
print(ytraindata)

xtraindata = np.asarray(xtraindata)
ytraindata = np.asarray(ytraindata)
xtestdata = np.asarray(xtestdata)
ytestdata = np.asarray(ytestdata)
x=np.asarray(x)
y=np.asarray(y)


xtraindata = xtraindata.reshape(1400,24,24,1)
xtestdata = xtestdata.reshape(600,24,24,1)

activation = ["tanh", "relu", "sigmoid", "softmax"]
input_size1 = range(10)
input_size2 = range(10)
k_scores = []
in_size = []

possible = list(it.permutations(activation, 4))

for c in possible:
    for i in input_size1:
        for a in input_size2:
            model = tf.keras.Sequential([tf.keras.layers.Conv2D(256, kernel_size=(3,3), padding='same', activation='relu'),
                                         tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
                                         tf.keras.layers.Conv2D(512, kernel_size=(3,3), padding='same', activation='relu'),
                                         tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
                                         tf.keras.layers.Dense(250, activation=c[0]),
                                         tf.keras.layers.Dense(i, activation=c[1]),
                                         tf.keras.layers.Dense(a, activation=c[2]),
                                         tf.keras.layers.Dense(1, activation=c[3])])
            model.compile(optimizer='sgd', loss='mse')
            val_loss = model.evaluate(xtestdata, ytestdata, verbose=1)
            k_scores.append(val_loss)
            in_size.append([i,a])
            
print(k_scores)
print("Best activation functions for each layer:", possible[(k_scores.index((min(k_scores)))) % len(possible)],
      "/n Best input sizes:", "840", in_size[k_scores.index((min(k_scores)))][0], in_size[k_scores.index((min(k_scores)))][1], "1")

model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(250, activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][0]))
model.add(tf.keras.layers.Dense(in_size[k_scores.index((min(k_scores)))][0], activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][1]))
model.add(tf.keras.layers.Dense(in_size[k_scores.index((min(k_scores)))][1], activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][2]))
model.add(tf.keras.layers.Dense(1, activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][3]))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy", "mse"])
model.fit(x, y, batch_size=16, epochs=5)
predictions = model.predict([x_test])
print(predictions)
print(predictions.shape)

输出层大小不同。你想要大小 (32, 1) 但模型的输出是 (32, 6, 6, 1)

MaxPooling2DDense() 之间插入 Flatten() 也许这项工作很好。

这是小费。 .evaluate 方法仅适用于经过训练的模型。你应该先使用.fit