[Unhandled promise rejection: Error: Size(150528) must match the product of shape 1]
[Unhandled promise rejection: Error: Size(150528) must match the product of shape 1]
我使用的是自定义模型,我使用 MobileNetV2 的迁移学习构建了模型,然后将其转换为 TF.js 格式。
模型接受 (batch_size, 224,224,3) 的输入
我不确定大小 (150528) 的来源,因为它不存在于我的 model.summary()
中
Link to model summary as it is too long to post here
Python 中的模型代码:
baseModel = MobileNetV2(include_top=False,
input_shape=INPUT_SHAPE,
weights='imagenet')
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dense(3, activation="softmax")(headModel)
# place the head FC model on top of the base model
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the training process
for layer in baseModel.layers:
layer.trainable = False
Javascript 中用于 React Native
的预测代码
// get predictions from the model
const getPrediction = async tensor => {
tensor = tensor.reshape(1, 224, 224, 3);
if (!tensor) {
console.log("Prediction not found");
return;
}
const prediction = await loadedModel.predict(tensor, 32);
console.log(`Predictions: ${JSON.stringify(prediction)}`);
if (!prediction || prediction.length === 0) {
return;
}
// Only take the predictions with a probability of 30% and greater
if (prediction[0].probability > 0.3) {
//Stop looping
cancelAnimationFrame(requestAnimationFrameId);
setPredictionFound(true);
setModelPrediction(prediction[0].className);
tensor.dispose();
}
};
// Load the model from the models folder
const loadModel = async () => {
const model = await tf.loadLayersModel(
bundleResourceIO(modelJSON, modelWeights)
);
console.log("Model loaded!");
return model;
};
// Handling the camera input and converting it into tensors to be used in the
// model for predictions
const handleCameraStream = imageAsTensors => {
const loop = async () => {
//loadedModel is the custom model I created and stored in state
if (loadedModel !== null) {
if (frameCount % makePredictionsEveryNFrames === 0) {
const imageTensor = imageAsTensors.next().value;
const faces = await getPrediction(imageTensor);
}
}
frameCount += 1;
//requestAnimationFrameID = 0
// makePredictionsEveryNFrames = 3
frameCount = frameCount % makePredictionsEveryNFrames;
requestAnimationFrameId = requestAnimationFrame(loop);
};
// loop infinitely to constantly make predictions
loop();
};
这是一个关于我传递给预测的输入形状的问题,已通过以下代码解决:
const model = await loadedModel;
const prediction = model.predict(tensor.reshape([1, 224, 224, 3]));
我使用的是自定义模型,我使用 MobileNetV2 的迁移学习构建了模型,然后将其转换为 TF.js 格式。
模型接受 (batch_size, 224,224,3) 的输入 我不确定大小 (150528) 的来源,因为它不存在于我的 model.summary()
中Link to model summary as it is too long to post here
Python 中的模型代码:
baseModel = MobileNetV2(include_top=False,
input_shape=INPUT_SHAPE,
weights='imagenet')
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(512, activation="relu")(headModel)
headModel = Dense(3, activation="softmax")(headModel)
# place the head FC model on top of the base model
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the training process
for layer in baseModel.layers:
layer.trainable = False
Javascript 中用于 React Native
的预测代码// get predictions from the model
const getPrediction = async tensor => {
tensor = tensor.reshape(1, 224, 224, 3);
if (!tensor) {
console.log("Prediction not found");
return;
}
const prediction = await loadedModel.predict(tensor, 32);
console.log(`Predictions: ${JSON.stringify(prediction)}`);
if (!prediction || prediction.length === 0) {
return;
}
// Only take the predictions with a probability of 30% and greater
if (prediction[0].probability > 0.3) {
//Stop looping
cancelAnimationFrame(requestAnimationFrameId);
setPredictionFound(true);
setModelPrediction(prediction[0].className);
tensor.dispose();
}
};
// Load the model from the models folder
const loadModel = async () => {
const model = await tf.loadLayersModel(
bundleResourceIO(modelJSON, modelWeights)
);
console.log("Model loaded!");
return model;
};
// Handling the camera input and converting it into tensors to be used in the
// model for predictions
const handleCameraStream = imageAsTensors => {
const loop = async () => {
//loadedModel is the custom model I created and stored in state
if (loadedModel !== null) {
if (frameCount % makePredictionsEveryNFrames === 0) {
const imageTensor = imageAsTensors.next().value;
const faces = await getPrediction(imageTensor);
}
}
frameCount += 1;
//requestAnimationFrameID = 0
// makePredictionsEveryNFrames = 3
frameCount = frameCount % makePredictionsEveryNFrames;
requestAnimationFrameId = requestAnimationFrame(loop);
};
// loop infinitely to constantly make predictions
loop();
};
这是一个关于我传递给预测的输入形状的问题,已通过以下代码解决:
const model = await loadedModel;
const prediction = model.predict(tensor.reshape([1, 224, 224, 3]));