inference.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. #include "inference.h"
  2. #include <regex>
  3. #define benchmark
  4. #define min(a,b) (((a) < (b)) ? (a) : (b))
  5. YOLO_V8::YOLO_V8() {
  6. }
  7. YOLO_V8::~YOLO_V8() {
  8. delete session;
  9. }
  10. #ifdef USE_CUDA
  11. namespace Ort
  12. {
  13. template<>
  14. struct TypeToTensorType<half> { static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16; };
  15. }
  16. #endif
  17. template<typename T>
  18. char* BlobFromImage(cv::Mat& iImg, T& iBlob) {
  19. int channels = iImg.channels();
  20. int imgHeight = iImg.rows;
  21. int imgWidth = iImg.cols;
  22. for (int c = 0; c < channels; c++)
  23. {
  24. for (int h = 0; h < imgHeight; h++)
  25. {
  26. for (int w = 0; w < imgWidth; w++)
  27. {
  28. iBlob[c * imgWidth * imgHeight + h * imgWidth + w] = typename std::remove_pointer<T>::type(
  29. (iImg.at<cv::Vec3b>(h, w)[c]) / 255.0f);
  30. }
  31. }
  32. }
  33. return RET_OK;
  34. }
  35. char* YOLO_V8::PreProcess(cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oImg)
  36. {
  37. if (iImg.channels() == 3)
  38. {
  39. oImg = iImg.clone();
  40. cv::cvtColor(oImg, oImg, cv::COLOR_BGR2RGB);
  41. }
  42. else
  43. {
  44. cv::cvtColor(iImg, oImg, cv::COLOR_GRAY2RGB);
  45. }
  46. switch (modelType)
  47. {
  48. case YOLO_DETECT_V8:
  49. case YOLO_POSE:
  50. case YOLO_DETECT_V8_HALF:
  51. case YOLO_POSE_V8_HALF://LetterBox
  52. {
  53. if (iImg.cols >= iImg.rows)
  54. {
  55. resizeScales = iImg.cols / (float)iImgSize.at(0);
  56. cv::resize(oImg, oImg, cv::Size(iImgSize.at(0), int(iImg.rows / resizeScales)));
  57. }
  58. else
  59. {
  60. resizeScales = iImg.rows / (float)iImgSize.at(0);
  61. cv::resize(oImg, oImg, cv::Size(int(iImg.cols / resizeScales), iImgSize.at(1)));
  62. }
  63. cv::Mat tempImg = cv::Mat::zeros(iImgSize.at(0), iImgSize.at(1), CV_8UC3);
  64. oImg.copyTo(tempImg(cv::Rect(0, 0, oImg.cols, oImg.rows)));
  65. oImg = tempImg;
  66. break;
  67. }
  68. case YOLO_CLS://CenterCrop
  69. {
  70. int h = iImg.rows;
  71. int w = iImg.cols;
  72. int m = min(h, w);
  73. int top = (h - m) / 2;
  74. int left = (w - m) / 2;
  75. cv::resize(oImg(cv::Rect(left, top, m, m)), oImg, cv::Size(iImgSize.at(0), iImgSize.at(1)));
  76. break;
  77. }
  78. }
  79. return RET_OK;
  80. }
  81. char* YOLO_V8::CreateSession(DL_INIT_PARAM& iParams) {
  82. char* Ret = RET_OK;
  83. std::regex pattern("[\u4e00-\u9fa5]");
  84. bool result = std::regex_search(iParams.modelPath, pattern);
  85. if (result)
  86. {
  87. Ret = "[YOLO_V8]:Your model path is error.Change your model path without chinese characters.";
  88. std::cout << Ret << std::endl;
  89. return Ret;
  90. }
  91. try
  92. {
  93. rectConfidenceThreshold = iParams.rectConfidenceThreshold;
  94. iouThreshold = iParams.iouThreshold;
  95. imgSize = iParams.imgSize;
  96. modelType = iParams.modelType;
  97. env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "Yolo");
  98. Ort::SessionOptions sessionOption;
  99. if (iParams.cudaEnable)
  100. {
  101. cudaEnable = iParams.cudaEnable;
  102. OrtCUDAProviderOptions cudaOption;
  103. cudaOption.device_id = 0;
  104. sessionOption.AppendExecutionProvider_CUDA(cudaOption);
  105. }
  106. sessionOption.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
  107. sessionOption.SetIntraOpNumThreads(iParams.intraOpNumThreads);
  108. sessionOption.SetLogSeverityLevel(iParams.logSeverityLevel);
  109. #ifdef _WIN32
  110. int ModelPathSize = MultiByteToWideChar(CP_UTF8, 0, iParams.modelPath.c_str(), static_cast<int>(iParams.modelPath.length()), nullptr, 0);
  111. wchar_t* wide_cstr = new wchar_t[ModelPathSize + 1];
  112. MultiByteToWideChar(CP_UTF8, 0, iParams.modelPath.c_str(), static_cast<int>(iParams.modelPath.length()), wide_cstr, ModelPathSize);
  113. wide_cstr[ModelPathSize] = L'\0';
  114. const wchar_t* modelPath = wide_cstr;
  115. #else
  116. const char* modelPath = iParams.modelPath.c_str();
  117. #endif // _WIN32
  118. session = new Ort::Session(env, modelPath, sessionOption);
  119. Ort::AllocatorWithDefaultOptions allocator;
  120. size_t inputNodesNum = session->GetInputCount();
  121. for (size_t i = 0; i < inputNodesNum; i++)
  122. {
  123. Ort::AllocatedStringPtr input_node_name = session->GetInputNameAllocated(i, allocator);
  124. char* temp_buf = new char[50];
  125. strcpy(temp_buf, input_node_name.get());
  126. inputNodeNames.push_back(temp_buf);
  127. }
  128. size_t OutputNodesNum = session->GetOutputCount();
  129. for (size_t i = 0; i < OutputNodesNum; i++)
  130. {
  131. Ort::AllocatedStringPtr output_node_name = session->GetOutputNameAllocated(i, allocator);
  132. char* temp_buf = new char[10];
  133. strcpy(temp_buf, output_node_name.get());
  134. outputNodeNames.push_back(temp_buf);
  135. }
  136. options = Ort::RunOptions{ nullptr };
  137. WarmUpSession();
  138. return RET_OK;
  139. }
  140. catch (const std::exception& e)
  141. {
  142. const char* str1 = "[YOLO_V8]:";
  143. const char* str2 = e.what();
  144. std::string result = std::string(str1) + std::string(str2);
  145. char* merged = new char[result.length() + 1];
  146. std::strcpy(merged, result.c_str());
  147. std::cout << merged << std::endl;
  148. delete[] merged;
  149. return "[YOLO_V8]:Create session failed.";
  150. }
  151. }
  152. char* YOLO_V8::RunSession(cv::Mat& iImg, std::vector<DL_RESULT>& oResult) {
  153. #ifdef benchmark
  154. clock_t starttime_1 = clock();
  155. #endif // benchmark
  156. char* Ret = RET_OK;
  157. cv::Mat processedImg;
  158. PreProcess(iImg, imgSize, processedImg);
  159. if (modelType < 4)
  160. {
  161. float* blob = new float[processedImg.total() * 3];
  162. BlobFromImage(processedImg, blob);
  163. std::vector<int64_t> inputNodeDims = { 1, 3, imgSize.at(0), imgSize.at(1) };
  164. TensorProcess(starttime_1, iImg, blob, inputNodeDims, oResult);
  165. }
  166. else
  167. {
  168. #ifdef USE_CUDA
  169. half* blob = new half[processedImg.total() * 3];
  170. BlobFromImage(processedImg, blob);
  171. std::vector<int64_t> inputNodeDims = { 1,3,imgSize.at(0),imgSize.at(1) };
  172. TensorProcess(starttime_1, iImg, blob, inputNodeDims, oResult);
  173. #endif
  174. }
  175. return Ret;
  176. }
  177. template<typename N>
  178. char* YOLO_V8::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::vector<int64_t>& inputNodeDims,
  179. std::vector<DL_RESULT>& oResult) {
  180. Ort::Value inputTensor = Ort::Value::CreateTensor<typename std::remove_pointer<N>::type>(
  181. Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1),
  182. inputNodeDims.data(), inputNodeDims.size());
  183. #ifdef benchmark
  184. clock_t starttime_2 = clock();
  185. #endif // benchmark
  186. auto outputTensor = session->Run(options, inputNodeNames.data(), &inputTensor, 1, outputNodeNames.data(),
  187. outputNodeNames.size());
  188. #ifdef benchmark
  189. clock_t starttime_3 = clock();
  190. #endif // benchmark
  191. Ort::TypeInfo typeInfo = outputTensor.front().GetTypeInfo();
  192. auto tensor_info = typeInfo.GetTensorTypeAndShapeInfo();
  193. std::vector<int64_t> outputNodeDims = tensor_info.GetShape();
  194. auto output = outputTensor.front().GetTensorMutableData<typename std::remove_pointer<N>::type>();
  195. delete[] blob;
  196. switch (modelType)
  197. {
  198. case YOLO_DETECT_V8:
  199. case YOLO_DETECT_V8_HALF:
  200. {
  201. int signalResultNum = outputNodeDims[1];//84
  202. int strideNum = outputNodeDims[2];//8400
  203. std::vector<int> class_ids;
  204. std::vector<float> confidences;
  205. std::vector<cv::Rect> boxes;
  206. cv::Mat rawData;
  207. if (modelType == YOLO_DETECT_V8)
  208. {
  209. // FP32
  210. rawData = cv::Mat(signalResultNum, strideNum, CV_32F, output);
  211. }
  212. else
  213. {
  214. // FP16
  215. rawData = cv::Mat(signalResultNum, strideNum, CV_16F, output);
  216. rawData.convertTo(rawData, CV_32F);
  217. }
  218. // Note:
  219. // ultralytics add transpose operator to the output of yolov8 model.which make yolov8/v5/v7 has same shape
  220. // https://github.com/ultralytics/assets/releases/download/v8.3.0/yolov8n.pt
  221. rawData = rawData.t();
  222. float* data = (float*)rawData.data;
  223. for (int i = 0; i < strideNum; ++i)
  224. {
  225. float* classesScores = data + 4;
  226. cv::Mat scores(1, this->classes.size(), CV_32FC1, classesScores);
  227. cv::Point class_id;
  228. double maxClassScore;
  229. cv::minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);
  230. if (maxClassScore > rectConfidenceThreshold)
  231. {
  232. confidences.push_back(maxClassScore);
  233. class_ids.push_back(class_id.x);
  234. float x = data[0];
  235. float y = data[1];
  236. float w = data[2];
  237. float h = data[3];
  238. int left = int((x - 0.5 * w) * resizeScales);
  239. int top = int((y - 0.5 * h) * resizeScales);
  240. int width = int(w * resizeScales);
  241. int height = int(h * resizeScales);
  242. boxes.push_back(cv::Rect(left, top, width, height));
  243. }
  244. data += signalResultNum;
  245. }
  246. std::vector<int> nmsResult;
  247. cv::dnn::NMSBoxes(boxes, confidences, rectConfidenceThreshold, iouThreshold, nmsResult);
  248. for (int i = 0; i < nmsResult.size(); ++i)
  249. {
  250. int idx = nmsResult[i];
  251. DL_RESULT result;
  252. result.classId = class_ids[idx];
  253. result.confidence = confidences[idx];
  254. result.box = boxes[idx];
  255. oResult.push_back(result);
  256. }
  257. #ifdef benchmark
  258. clock_t starttime_4 = clock();
  259. double pre_process_time = (double)(starttime_2 - starttime_1) / CLOCKS_PER_SEC * 1000;
  260. double process_time = (double)(starttime_3 - starttime_2) / CLOCKS_PER_SEC * 1000;
  261. double post_process_time = (double)(starttime_4 - starttime_3) / CLOCKS_PER_SEC * 1000;
  262. if (cudaEnable)
  263. {
  264. std::cout << "[YOLO_V8(CUDA)]: " << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
  265. }
  266. else
  267. {
  268. std::cout << "[YOLO_V8(CPU)]: " << pre_process_time << "ms pre-process, " << process_time << "ms inference, " << post_process_time << "ms post-process." << std::endl;
  269. }
  270. #endif // benchmark
  271. break;
  272. }
  273. case YOLO_CLS:
  274. case YOLO_CLS_HALF:
  275. {
  276. cv::Mat rawData;
  277. if (modelType == YOLO_CLS) {
  278. // FP32
  279. rawData = cv::Mat(1, this->classes.size(), CV_32F, output);
  280. } else {
  281. // FP16
  282. rawData = cv::Mat(1, this->classes.size(), CV_16F, output);
  283. rawData.convertTo(rawData, CV_32F);
  284. }
  285. float *data = (float *) rawData.data;
  286. DL_RESULT result;
  287. for (int i = 0; i < this->classes.size(); i++)
  288. {
  289. result.classId = i;
  290. result.confidence = data[i];
  291. oResult.push_back(result);
  292. }
  293. break;
  294. }
  295. default:
  296. std::cout << "[YOLO_V8]: " << "Not support model type." << std::endl;
  297. }
  298. return RET_OK;
  299. }
  300. char* YOLO_V8::WarmUpSession() {
  301. clock_t starttime_1 = clock();
  302. cv::Mat iImg = cv::Mat(cv::Size(imgSize.at(0), imgSize.at(1)), CV_8UC3);
  303. cv::Mat processedImg;
  304. PreProcess(iImg, imgSize, processedImg);
  305. if (modelType < 4)
  306. {
  307. float* blob = new float[iImg.total() * 3];
  308. BlobFromImage(processedImg, blob);
  309. std::vector<int64_t> YOLO_input_node_dims = { 1, 3, imgSize.at(0), imgSize.at(1) };
  310. Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
  311. Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1),
  312. YOLO_input_node_dims.data(), YOLO_input_node_dims.size());
  313. auto output_tensors = session->Run(options, inputNodeNames.data(), &input_tensor, 1, outputNodeNames.data(),
  314. outputNodeNames.size());
  315. delete[] blob;
  316. clock_t starttime_4 = clock();
  317. double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;
  318. if (cudaEnable)
  319. {
  320. std::cout << "[YOLO_V8(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;
  321. }
  322. }
  323. else
  324. {
  325. #ifdef USE_CUDA
  326. half* blob = new half[iImg.total() * 3];
  327. BlobFromImage(processedImg, blob);
  328. std::vector<int64_t> YOLO_input_node_dims = { 1,3,imgSize.at(0),imgSize.at(1) };
  329. Ort::Value input_tensor = Ort::Value::CreateTensor<half>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1), YOLO_input_node_dims.data(), YOLO_input_node_dims.size());
  330. auto output_tensors = session->Run(options, inputNodeNames.data(), &input_tensor, 1, outputNodeNames.data(), outputNodeNames.size());
  331. delete[] blob;
  332. clock_t starttime_4 = clock();
  333. double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;
  334. if (cudaEnable)
  335. {
  336. std::cout << "[YOLO_V8(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;
  337. }
  338. #endif
  339. }
  340. return RET_OK;
  341. }