运行 两次后 ONNXRuntime 程序崩溃
ONNXRuntime program crashes after running twice
我正在编写一个应该在 VS2013 中 运行 的程序,它接收两张图像,运行 通过带有 C++ 的 ONNX 模型,return 模型的输出
因为我用的是图片,所以我用VS2019写程序,创建一个DLL,在VS2013运行
我能够将它用于一张图片并且效果很好。当我尝试使用两个图像时,运行ning 程序一次运行良好,但如果我尝试连续执行两次,它会在尝试删除 std::wstring
时因断言而崩溃。我试图追踪它到原点,但我能得到的最接近的是在 Microsoft Visual Studio19\Enterprise\VC\Tools\MSVC.29.30133\crt\src\vcruntime
中,在函数
中
_CRT_SECURITYCRITICAL_ATTRIBUTE
void __CRTDECL operator delete(void* const block, size_t const) noexcept
{
operator delete(block);
}
VS2019中的代码为:
#include <iostream>
#include <fstream>
#include <Windows.h>
Mat MeanOverChannels(Mat m) {
/*
* input: cv::Mat with more than one channel (=3)
* output: cv::Mat with one channel, that is the average over channels
*/
Size size = m.size();
int channels = m.channels();
Mat res(size, CV_64FC1);
for (int i = 0; i < size.height; i++) {
for (int j = 0; j < size.width; j++) {
double avg = 0;
auto cur = m.at<Vec3b>(i, j);
for (int c = 0; c < channels; c++)
{
avg += cur[c];
}
avg /= channels;
res.at<double>(i, j) = avg;
}
}
return res;
}
cv::Mat GetInputNormalized(string imgpath
, int& original_height, int& original_width
, int input_height, int input_width) {
/*
* input: path to image, and refernces to save the origianl size
* output: image from the path after resizing and normalizing
*/
//read input
Mat img = imread(imgpath, IMREAD_COLOR); // can use IMREAD_UNCHANGED
Size s = img.size();
original_height = s.height;
original_width = s.width;
//mean over axis=2, you can comment out if it is not needed for you
Mat img_mean = MeanOverChannels(img);
//resize down
int down_width = input_width;
int down_height = input_height;
Mat resized_down;
resize(img_mean, resized_down, Size(down_width, down_height), INTER_LINEAR);
//can return resized_down, from here you can customize the input
Mat img2float;
resized_down.convertTo(img2float, CV_64FC1);
//normalize pixels : p->(p-127.5)/127.5
Mat imgNorm = (img2float - 127.5) / 127.5;
return imgNorm;
}
bool Net::RunNet(std::wstring modelName, std::string inPath1, std::string inPath2, float& iRes) {
/*********this model assumes two inputs and one output************/
/////path to the onnx model/////
wchar_t buffer[MAX_PATH];
GetModuleFileNameW(NULL, buffer, MAX_PATH);
const wchar_t* endChar = LR"(\/)";
std::wstring::size_type pos = std::wstring(buffer).find_last_of(endChar);
std::wstring modelPath;
modelPath = std::wstring(buffer).substr(0, pos) + endChar + modelName;
bool success = false;
/////variables to run the model/////
Ort::Env env;
Ort::Session session{ env, modelPath.c_str(), Ort::SessionOptions{} };
Ort::AllocatorWithDefaultOptions allocator;
auto memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
auto* inputName1 = session.GetInputName(0, allocator);
auto* inputName2 = session.GetInputName(1, allocator);
auto* outputName = session.GetOutputName(0, allocator);
array inputNames = { inputName1, inputName2 };
array outputNames = { outputName };
auto inputShape1 = session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
auto inputShape2 = session.GetInputTypeInfo(1).GetTensorTypeAndShapeInfo().GetShape();
for (int i = 0;i < inputShape1.size(); ++i) {
inputShape1[i] = inputShape1[i] > 0 ? inputShape1[i] : 1; //in case inputShape[0] is -1 (None in python)
}
for (int i = 0;i < inputShape2.size(); ++i) {
inputShape2[i] = inputShape2[i] > 0 ? inputShape2[i] : 1; //in case inputShape[0] is -1 (None in python)
}
int input_height1 = inputShape1[1], input_width1 = inputShape1[2];
int input_height2 = inputShape2[1], input_width2 = inputShape2[2];
/////getting input to the net/////
Mat img1, img2;
int origHeight1, origWidth1
, origHeight2, origWidth2;
img1 = GetInputNormalized(inPath1
, origHeight1, origWidth1
, input_height1, input_width1
);
img2 = GetInputNormalized(inPath2
, origHeight2, origWidth2
, input_height2, input_width2
);
vf inputValues1 = MatTo1DVector(img1);
vf inputValues2 = MatTo1DVector(img2);
//// create the input tensor (this is not a deep copy!)
auto inputOnnxTensor1 = Ort::Value::CreateTensor<float>(memoryInfo,
inputValues1.data(), inputValues1.size(),
inputShape1.data(), inputShape1.size());
auto inputOnnxTensor2 = Ort::Value::CreateTensor<float>(memoryInfo,
inputValues2.data(), inputValues2.size(),
inputShape2.data(), inputShape2.size());
array input_tensor = { std::move(inputOnnxTensor1), std::move(inputOnnxTensor2) };
///////Executing the model/////
auto outputValues = session.Run(
Ort::RunOptions{ nullptr }, // e.g. set a verbosity level only for this run
inputNames.data(), input_tensor.data(), input_tensor.size(), // input to set
outputNames.data(), 1); // output to take
auto& output1 = outputValues[0];
const auto* floats = output1.GetTensorMutableData<float>();
const auto floatsCount = output1.GetTensorTypeAndShapeInfo().GetElementCount();
float res = *floats;
iRes = res;
allocator.Free(inputName1);
allocator.Free(inputName2);
allocator.Free(outputName);
success = true;
return success;
}
我导出的是NetFactory的一个函数,创建一个unique_ptr的Net,通过这个工厂我们创建一个Net的实例,_net
,我执行_net->RunNetSimCompare(modelName, inPath1, inPath2, res);
和returnres
执行后
问题出在哪里?
对于那些遇到同样问题的人,似乎将函数的签名更改为以下内容:
bool Net::RunNet(std::wstring
& modelName, std::string
& inPath1, std::string
& inPath2, float
& iRes)
修复它
我正在编写一个应该在 VS2013 中 运行 的程序,它接收两张图像,运行 通过带有 C++ 的 ONNX 模型,return 模型的输出
因为我用的是图片,所以我用VS2019写程序,创建一个DLL,在VS2013运行
我能够将它用于一张图片并且效果很好。当我尝试使用两个图像时,运行ning 程序一次运行良好,但如果我尝试连续执行两次,它会在尝试删除 std::wstring
时因断言而崩溃。我试图追踪它到原点,但我能得到的最接近的是在 Microsoft Visual Studio19\Enterprise\VC\Tools\MSVC.29.30133\crt\src\vcruntime
中,在函数
_CRT_SECURITYCRITICAL_ATTRIBUTE
void __CRTDECL operator delete(void* const block, size_t const) noexcept
{
operator delete(block);
}
VS2019中的代码为:
#include <iostream>
#include <fstream>
#include <Windows.h>
Mat MeanOverChannels(Mat m) {
/*
* input: cv::Mat with more than one channel (=3)
* output: cv::Mat with one channel, that is the average over channels
*/
Size size = m.size();
int channels = m.channels();
Mat res(size, CV_64FC1);
for (int i = 0; i < size.height; i++) {
for (int j = 0; j < size.width; j++) {
double avg = 0;
auto cur = m.at<Vec3b>(i, j);
for (int c = 0; c < channels; c++)
{
avg += cur[c];
}
avg /= channels;
res.at<double>(i, j) = avg;
}
}
return res;
}
cv::Mat GetInputNormalized(string imgpath
, int& original_height, int& original_width
, int input_height, int input_width) {
/*
* input: path to image, and refernces to save the origianl size
* output: image from the path after resizing and normalizing
*/
//read input
Mat img = imread(imgpath, IMREAD_COLOR); // can use IMREAD_UNCHANGED
Size s = img.size();
original_height = s.height;
original_width = s.width;
//mean over axis=2, you can comment out if it is not needed for you
Mat img_mean = MeanOverChannels(img);
//resize down
int down_width = input_width;
int down_height = input_height;
Mat resized_down;
resize(img_mean, resized_down, Size(down_width, down_height), INTER_LINEAR);
//can return resized_down, from here you can customize the input
Mat img2float;
resized_down.convertTo(img2float, CV_64FC1);
//normalize pixels : p->(p-127.5)/127.5
Mat imgNorm = (img2float - 127.5) / 127.5;
return imgNorm;
}
bool Net::RunNet(std::wstring modelName, std::string inPath1, std::string inPath2, float& iRes) {
/*********this model assumes two inputs and one output************/
/////path to the onnx model/////
wchar_t buffer[MAX_PATH];
GetModuleFileNameW(NULL, buffer, MAX_PATH);
const wchar_t* endChar = LR"(\/)";
std::wstring::size_type pos = std::wstring(buffer).find_last_of(endChar);
std::wstring modelPath;
modelPath = std::wstring(buffer).substr(0, pos) + endChar + modelName;
bool success = false;
/////variables to run the model/////
Ort::Env env;
Ort::Session session{ env, modelPath.c_str(), Ort::SessionOptions{} };
Ort::AllocatorWithDefaultOptions allocator;
auto memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
auto* inputName1 = session.GetInputName(0, allocator);
auto* inputName2 = session.GetInputName(1, allocator);
auto* outputName = session.GetOutputName(0, allocator);
array inputNames = { inputName1, inputName2 };
array outputNames = { outputName };
auto inputShape1 = session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape();
auto inputShape2 = session.GetInputTypeInfo(1).GetTensorTypeAndShapeInfo().GetShape();
for (int i = 0;i < inputShape1.size(); ++i) {
inputShape1[i] = inputShape1[i] > 0 ? inputShape1[i] : 1; //in case inputShape[0] is -1 (None in python)
}
for (int i = 0;i < inputShape2.size(); ++i) {
inputShape2[i] = inputShape2[i] > 0 ? inputShape2[i] : 1; //in case inputShape[0] is -1 (None in python)
}
int input_height1 = inputShape1[1], input_width1 = inputShape1[2];
int input_height2 = inputShape2[1], input_width2 = inputShape2[2];
/////getting input to the net/////
Mat img1, img2;
int origHeight1, origWidth1
, origHeight2, origWidth2;
img1 = GetInputNormalized(inPath1
, origHeight1, origWidth1
, input_height1, input_width1
);
img2 = GetInputNormalized(inPath2
, origHeight2, origWidth2
, input_height2, input_width2
);
vf inputValues1 = MatTo1DVector(img1);
vf inputValues2 = MatTo1DVector(img2);
//// create the input tensor (this is not a deep copy!)
auto inputOnnxTensor1 = Ort::Value::CreateTensor<float>(memoryInfo,
inputValues1.data(), inputValues1.size(),
inputShape1.data(), inputShape1.size());
auto inputOnnxTensor2 = Ort::Value::CreateTensor<float>(memoryInfo,
inputValues2.data(), inputValues2.size(),
inputShape2.data(), inputShape2.size());
array input_tensor = { std::move(inputOnnxTensor1), std::move(inputOnnxTensor2) };
///////Executing the model/////
auto outputValues = session.Run(
Ort::RunOptions{ nullptr }, // e.g. set a verbosity level only for this run
inputNames.data(), input_tensor.data(), input_tensor.size(), // input to set
outputNames.data(), 1); // output to take
auto& output1 = outputValues[0];
const auto* floats = output1.GetTensorMutableData<float>();
const auto floatsCount = output1.GetTensorTypeAndShapeInfo().GetElementCount();
float res = *floats;
iRes = res;
allocator.Free(inputName1);
allocator.Free(inputName2);
allocator.Free(outputName);
success = true;
return success;
}
我导出的是NetFactory的一个函数,创建一个unique_ptr的Net,通过这个工厂我们创建一个Net的实例,_net
,我执行_net->RunNetSimCompare(modelName, inPath1, inPath2, res);
和returnres
执行后
问题出在哪里?
对于那些遇到同样问题的人,似乎将函数的签名更改为以下内容:
bool Net::RunNet(std::wstring
&modelName, std::string
&inPath1, std::string
&inPath2, float
&iRes)
修复它