背景:
最近由于项目原因,需要用C++做一些目标检测的任务,就捣鼓一下YOLOv5,发现部署确实很方便,将YOLOv5模型转为onnx模型后,可以用OpenCV的dnn.readNetFromONNX读取该模型,接着就是输入预处理和输出结果解析的事情。
然而,当我将tf15训练得到的FasterRCNN模型并利用tf2onnx成功转为onnx模型后,却不能用OpenCV读取,报出以下错误,而onnxruntime可以成功调用该模型。
复制代码
1
2
3
4cv2.error: OpenCV(4.5.4) D:aopencv-pythonopencv-pythonopencvmodulesdnnsrconnxonnx_graph_simplifier.cpp:692: error: (-210:Unsupported format or combination of formats) Unsupported data type: BOOL in function 'cv::dnn::dnn4_v20211004::getMatFromTensor'
大概意思可能是:不支持的数据类型从而导致不支持该操作吧
程序:
所以,只好采用onnxruntime的C++接口进行模型调用,废话不多说,直接上代码:
复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57//FRCNN.h #pragma once #include<iostream> #include<fstream> #include<numeric> #include<opencv.hpp> //#include"../commonStruct.h" //#include"../BaseShipDetectionModel.h" #include <onnxruntime_cxx_api.h> # class FRCNN { public: FRCNN(); ~FRCNN(); bool readModel(std::string &netPath, bool isCuda=false); bool DetectShip(cv::Mat &SrcImg, std::vector<Output> &output); void drawPredShip(cv::Mat &img, std::vector<Output>& result); typedef struct Output { int ClsId;// float confidence;// cv::Rect box;// Output() :ClsId(), confidence(), box() {} Output(int a, float b, cv::Rect c) { this->ClsId = a; this->confidence = b; this->box=c; } }Output ; private: enum OutputFlag { //NOTHING, BOXES, SCORES, CLSIDS }; Ort::Env *OnnxEnv; Ort::SessionOptions OnnxSessionOp; Ort::Session* OnnxSession; Ort::AllocatorWithDefaultOptions allocator; Ort::MemoryInfo *memory_info; cv::Size2f factor; const int netWidth = 1067; const int netHeight = 600; float nmsThreshold = 0.45; float boxThreshold = 0.31; float classThreshold = 0.25; size_t num_input_nodes, num_output_nodes; std::vector<const char*> input_node_names, output_node_names; std::vector<OutputFlag> output_node_namesFlag; //Ort::Value *input_tensor; std::vector<int64_t> input_node_dims = { netHeight, netWidth,3 }; size_t input_tensor_size = 3 * netHeight * netWidth; void parseOnnxOutput(std::vector<Ort::Value>&inputTensors, std::vector<Output> &results); };
复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227//FRCNN.cpp #include "FRCNN.h" using namespace std; using namespace cv; using namespace dnn; #if 1 FRCNN::FRCNN() { num_input_nodes = 0; num_output_nodes = 0; //Ort::Env env(ORT_LOGGING_LEVEL_VERBOSE, "test"); OnnxEnv = new Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test"); //Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); memory_info=new Ort::MemoryInfo(Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault)); //memory_info =new Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); if (OnnxEnv == nullptr) { std::cout << "new Error for " << VNAME(OnnxEnv) << std::endl; throw OnnxEnv; } if (memory_info == nullptr) { std::cout << "new Error for " << VNAME(memory_info) << std::endl; throw memory_info; } OnnxSessionOp.SetIntraOpNumThreads(5); OnnxSessionOp.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); flag_onnx = true; /*cout << OnnxEnv << 't' << *OnnxEnv << endl; cout << &env << 't' << env << endl;*/ //test(*OnnxEnv, env, OnnxEnv); //std::cout<<env.operator const OrtEnv * } FRCNN::~FRCNN() { if(OnnxEnv!=nullptr) delete OnnxEnv; if (OnnxSession != nullptr) delete OnnxSession; } bool FRCNN::readModel(string &netPath, bool isCuda) { try { std::ifstream f(netPath.c_str()); std::cout << f.good() << std::endl; std::wstring wnetPath = std::wstring(netPath.begin(), netPath.end()); OnnxSession=new Ort::Session((*OnnxEnv), wnetPath.c_str(), OnnxSessionOp); if (OnnxSession == nullptr) { std::cout << "new Error for " << VNAME(OnnxSession) << std::endl; throw OnnxSession; } // print model input layer (node names, types, shape etc.) // print number of model input nodes num_input_nodes = OnnxSession->GetInputCount(); num_output_nodes = OnnxSession->GetOutputCount(); for (int i = 0; i < num_input_nodes; ++i) { input_node_names.push_back(OnnxSession->GetInputName(0, allocator)); //= { "image:0"}; std::cout << input_node_names[i] << std::endl; } for (int i = 0; i < num_output_nodes; ++i) { char* name = OnnxSession->GetOutputName(i, allocator); output_node_names.push_back(name); if (strstr(name, "boxes") != nullptr) { output_node_namesFlag.push_back(BOXES); } else if (strstr(name, "scores") != nullptr) { output_node_namesFlag.push_back(SCORES); } else if (strstr(name, "labels") != nullptr) { output_node_namesFlag.push_back(CLSIDS); } else { //output_node_namesFlag.push_back(NOTHING); throw(name); } //= { "output/boxes:0", "output/scores:0","output/labels:0"}; std::cout << output_node_names[i] << std::endl; } } catch (const std::exception& e) { return false; } return true; } bool FRCNN::DetectShip(cv::Mat &SrcImg, std::vector<Output> &results) { if (SrcImg.empty()) { std::cout << "empty image error!" << std::endl; return false; } int col = SrcImg.cols; int row = SrcImg.rows; int i, j; results.clear(); Mat netInputImg,Img; std::vector<int> indices; Output result; //netInputImg.create(SrcImg.size,SrcImg.depth()); //SrcImg.copyTo(netInputImg); cv::resize(SrcImg, Img, cv::Size(netWidth, netHeight), 0.0, 0.0, cv::INTER_LINEAR); factor= cv::Size2f((float)SrcImg.cols / netWidth, (float)SrcImg.rows / netHeight); try { netInputImg.create(cv::Size(netWidth, netHeight), CV_32FC3);//allocate the continuous Mat Img.convertTo(netInputImg, CV_32F); assert(netInputImg.isContinuous());// Ort::Value input_tensor = Ort::Value::CreateTensor<float>(*memory_info, (float*)netInputImg.data, input_tensor_size, input_node_dims.data(), 3); assert(input_tensor.IsTensor()); std::vector<Ort::Value> ort_inputs; ort_inputs.push_back(std::move(input_tensor)); //Run the Detection std::vector<Ort::Value> output_tensors = OnnxSession->Run(Ort::RunOptions{ nullptr }, input_node_names.data(), ort_inputs.data(), ort_inputs.size(), output_node_names.data(), 3); parseOnnxOutput(output_tensors, results); } catch (...) { std::cout << "prediction error!" << std::endl; return false; } if (results.size()) return true; else return false; } void FRCNN::drawPredShip(cv::Mat & img, std::vector<Output>& result) { int left, top, baseLine, color_num; for (int i = 0; i < result.size(); i++) { left = result[i].box.x; top = result[i].box.y; color_num = i; rectangle(img, result[i].box, cv::Scalar(0,0,255), 2, 8); std::string label = std::to_string(result[i].ClsId) + ":" + std::to_string(result[i].confidence); cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine); top = std::max(top, labelSize.height); putText(img, label, cv::Point(left, top), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0,0,255), 2); } } void FRCNN::parseOnnxOutput(std::vector<Ort::Value>& inputTensors, std::vector<Output>& results) { std::vector<int64_t> classIds; std::vector<float> confidences; std::vector<cv::Rect> boxes; int i, j; std::vector<int64_t> shape; size_t eleCount; size_t DimCount; int xmin, xmax, ymin, ymax; for (i = 0; i < num_output_nodes; ++i) { Ort::TensorTypeAndShapeInfo Info = inputTensors[i].GetTensorTypeAndShapeInfo(); //std::cout << ":GetDimensionsCount:" << Info.GetDimensionsCount() << 't'; shape = Info.GetShape(); DimCount = shape.size(); //std::cout << i << "shape:"; //for (int j = 0; j < shape.size(); ++j) { // std::cout << shape[j] << 't'; //} eleCount = Info.GetElementCount(); //std::cout << ":GetElementCount:" << eleCount << 't'; ONNXTensorElementDataType onnxType = Info.GetElementType(); void* ptr = nullptr; //std::cout << "GetElementType:" << onnxType << 't' << std::endl; switch (onnxType) { case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:// maps to c type float { ptr = inputTensors[i].GetTensorMutableData<float>(); } break; case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:// maps to c type int64_t { ptr = inputTensors[i].GetTensorMutableData<int64_t>(); } break; default: throw("Unknown DataType!"); break; } //std::cout << sizeof(ptr) << sizeof((float*)ptr) << sizeof((int64_t*)ptr)<<sizeof((uint8_t*)ptr)<< sizeof((uint16_t*)ptr)<< sizeof((int32_t*)ptr) << std::endl; //= { "output/boxes:0", "output/scores:0","output/labels:0"}; /* output[0]//(44->(11,4)) output[1]//(11->(11)) output[2]//(11->(11)) */ switch (output_node_namesFlag[i]) { case BOXES://xmin,ymin,xmax,ymax { float* p_boxes = (float*)ptr; for (j = 0; j < eleCount; j += 4) { xmin = p_boxes[j] *factor.width; ymin = p_boxes[j + 1] * factor.height; xmax = p_boxes[j + 2] *factor.width; ymax = p_boxes[j + 3] *factor.height; boxes.push_back(cv::Rect(xmin, ymin, xmax - xmin, ymax - ymin)); } break; } case SCORES: { float* p_scores = (float*)ptr; for (j = 0; j < eleCount; j++) { confidences.push_back(p_scores[j]); } break; } case CLSIDS: { int64_t* p_clsids = (int64_t*)ptr; for (j = 0; j < eleCount; j++) { classIds.push_back(p_clsids[j]); } break; } } } Output result; assert((boxes.size() == classIds.size())&&(boxes.size()==confidences.size())); for (i = 0; i < boxes.size(); ++i) { //j = indices[i]; if (confidences[i] > boxThreshold) { result.ClsId = classIds[i]-1;//except background result.confidence = confidences[i]; result.box = boxes[i]; results.push_back(result); } } } #endif
代码调用顺序:
1.readModel//读取模型
2.DetectShip//检测目标,函数名根据需要修改
3.drawPredShip//画图,函数名根据需要修改
该代码对应FasterRCNN模型下载链接: fasterRCNN.model-深度学习文档类资源-CSDN文库
后记:
本文仅为onnxruntime的C++调用作个笔记,特别是对输入数据准备与输出数据解析这两部分,如有疑问,请不吝指教!
最后
以上就是深情蜻蜓最近收集整理的关于C++:onnxruntime调用FasterRCNN模型的全部内容,更多相关C++内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复