如何在 C++ 中获取 Tflite 模型输出?
How to get Tflite model output in c++?
我有一个用于掩码检测的 tflite 模型,带有一个输出值介于 0[掩码] 和 1[no_mask]
之间的 S 形层
我使用 netron 检查了输入和输出节点,这是我得到的结果:
我在 python 中测试了推理模型,效果很好。
# A simple inference pipline
import numpy as np
import tensorflow as tf
import cv2
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="efficient_net.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Rescale to [1,32,32,1].
input_shape = input_details[0]['shape']
img = cv2.imread("nomask.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
input_data = img_gray[ ..., tf.newaxis]
input_data =tf.image.resize(input_data, [32,32])
input_data = input_data[ tf.newaxis,...]
input_data = np.array(input_data, dtype=np.float32)
# setting input
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data[0][0])
我尝试使用 C++ 做同样的事情,但我得到 0 或根本没有输出
#include <iostream>
#include <cstdio>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "opencv2/opencv.hpp"
using namespace cv;
#define TFLITE_MINIMAL_CHECK(x) \
if (!(x)) \
{ \
fprintf(stderr, "Error at %s:%d\n", __FILE__, __LINE__); \
exit(1); \
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
fprintf(stderr, "minimal <tflite model>\n");
return 1;
}
const char* filename = argv[1];
// read image file
cv::Mat img = cv::imread("D:\nomask.png");
// convert to float; BGR -> Grayscale
cv::Mat inputImg;
img.convertTo(inputImg, CV_32FC1);
cv::cvtColor(inputImg, inputImg, cv::COLOR_BGR2GRAY);
// resize image as model input
cv::resize(inputImg, inputImg, cv::Size(32, 32));
// Load model
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(filename);
TFLITE_MINIMAL_CHECK(model != nullptr);
// Build the interpreter with the InterpreterBuilder.
// Note: all Interpreters should be built with the InterpreterBuilder,
// which allocates memory for the Intrepter and does various set up
// tasks so that the Interpreter can read the provided model.
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
std::unique_ptr<tflite::Interpreter> interpreter;
builder(&interpreter);
TFLITE_MINIMAL_CHECK(interpreter != nullptr);
// Allocate tensor buffers.
TFLITE_MINIMAL_CHECK(interpreter->AllocateTensors() == kTfLiteOk);
// Fill input buffers
// TODO(user): Insert code to fill input tensors.
// Note: The buffer of the input tensor with index `i` of type T can
float* input = interpreter->typed_input_tensor<float>(0);
input = inputImg.ptr<float>(0);
// Run inference
TFLITE_MINIMAL_CHECK(interpreter->Invoke() == kTfLiteOk);
printf("\n\n=== Post-invoke Interpreter State ===\n");
float* output = interpreter->typed_output_tensor<float>(149);
std::cout << output[0];
return 0;
}
我尝试将输出索引更改为 0 而不是 149,但我总是得到一个小输出值,表明无论输入是什么都有一个掩码(这在 python 推理中不会发生)
我做错了什么??
代码现在可以很好地处理这些更改:
memcpy(input,img.data,32*32*sizeof(float));
而不是
input = inputImg.ptr<float>(0);
并使用索引 0 作为输出
float* output = interpreter->typed_output_tensor<float>(0);
这里的索引表示输出张量的顺序而不是它的位置
我有一个用于掩码检测的 tflite 模型,带有一个输出值介于 0[掩码] 和 1[no_mask]
之间的 S 形层我使用 netron 检查了输入和输出节点,这是我得到的结果:
我在 python 中测试了推理模型,效果很好。
# A simple inference pipline
import numpy as np
import tensorflow as tf
import cv2
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="efficient_net.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Rescale to [1,32,32,1].
input_shape = input_details[0]['shape']
img = cv2.imread("nomask.jpg")
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
input_data = img_gray[ ..., tf.newaxis]
input_data =tf.image.resize(input_data, [32,32])
input_data = input_data[ tf.newaxis,...]
input_data = np.array(input_data, dtype=np.float32)
# setting input
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data[0][0])
我尝试使用 C++ 做同样的事情,但我得到 0 或根本没有输出
#include <iostream>
#include <cstdio>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "opencv2/opencv.hpp"
using namespace cv;
#define TFLITE_MINIMAL_CHECK(x) \
if (!(x)) \
{ \
fprintf(stderr, "Error at %s:%d\n", __FILE__, __LINE__); \
exit(1); \
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
fprintf(stderr, "minimal <tflite model>\n");
return 1;
}
const char* filename = argv[1];
// read image file
cv::Mat img = cv::imread("D:\nomask.png");
// convert to float; BGR -> Grayscale
cv::Mat inputImg;
img.convertTo(inputImg, CV_32FC1);
cv::cvtColor(inputImg, inputImg, cv::COLOR_BGR2GRAY);
// resize image as model input
cv::resize(inputImg, inputImg, cv::Size(32, 32));
// Load model
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(filename);
TFLITE_MINIMAL_CHECK(model != nullptr);
// Build the interpreter with the InterpreterBuilder.
// Note: all Interpreters should be built with the InterpreterBuilder,
// which allocates memory for the Intrepter and does various set up
// tasks so that the Interpreter can read the provided model.
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder builder(*model, resolver);
std::unique_ptr<tflite::Interpreter> interpreter;
builder(&interpreter);
TFLITE_MINIMAL_CHECK(interpreter != nullptr);
// Allocate tensor buffers.
TFLITE_MINIMAL_CHECK(interpreter->AllocateTensors() == kTfLiteOk);
// Fill input buffers
// TODO(user): Insert code to fill input tensors.
// Note: The buffer of the input tensor with index `i` of type T can
float* input = interpreter->typed_input_tensor<float>(0);
input = inputImg.ptr<float>(0);
// Run inference
TFLITE_MINIMAL_CHECK(interpreter->Invoke() == kTfLiteOk);
printf("\n\n=== Post-invoke Interpreter State ===\n");
float* output = interpreter->typed_output_tensor<float>(149);
std::cout << output[0];
return 0;
}
我尝试将输出索引更改为 0 而不是 149,但我总是得到一个小输出值,表明无论输入是什么都有一个掩码(这在 python 推理中不会发生) 我做错了什么??
代码现在可以很好地处理这些更改:
memcpy(input,img.data,32*32*sizeof(float));
而不是
input = inputImg.ptr<float>(0);
并使用索引 0 作为输出
float* output = interpreter->typed_output_tensor<float>(0);
这里的索引表示输出张量的顺序而不是它的位置