test_model.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. #include "test_precomp.hpp"
  5. #include <opencv2/dnn/shape_utils.hpp>
  6. #include "npy_blob.hpp"
  7. namespace opencv_test { namespace {
  8. template<typename TString>
  9. static std::string _tf(TString filename, bool required = true)
  10. {
  11. String rootFolder = "dnn/";
  12. return findDataFile(rootFolder + filename, required);
  13. }
  14. class Test_Model : public DNNTestLayer
  15. {
  16. public:
  17. void testDetectModel(const std::string& weights, const std::string& cfg,
  18. const std::string& imgPath, const std::vector<int>& refClassIds,
  19. const std::vector<float>& refConfidences,
  20. const std::vector<Rect2d>& refBoxes,
  21. double scoreDiff, double iouDiff,
  22. double confThreshold = 0.24, double nmsThreshold = 0.0,
  23. const Size& size = {-1, -1}, Scalar mean = Scalar(),
  24. double scale = 1.0, bool swapRB = false, bool crop = false,
  25. bool nmsAcrossClasses = false)
  26. {
  27. checkBackend();
  28. Mat frame = imread(imgPath);
  29. DetectionModel model(weights, cfg);
  30. model.setInputSize(size).setInputMean(mean).setInputScale(scale)
  31. .setInputSwapRB(swapRB).setInputCrop(crop);
  32. model.setPreferableBackend(backend);
  33. model.setPreferableTarget(target);
  34. model.setNmsAcrossClasses(nmsAcrossClasses);
  35. std::vector<int> classIds;
  36. std::vector<float> confidences;
  37. std::vector<Rect> boxes;
  38. model.detect(frame, classIds, confidences, boxes, confThreshold, nmsThreshold);
  39. std::vector<Rect2d> boxesDouble(boxes.size());
  40. for (int i = 0; i < boxes.size(); i++) {
  41. boxesDouble[i] = boxes[i];
  42. }
  43. normAssertDetections(refClassIds, refConfidences, refBoxes, classIds,
  44. confidences, boxesDouble, "",
  45. confThreshold, scoreDiff, iouDiff);
  46. }
  47. void testClassifyModel(const std::string& weights, const std::string& cfg,
  48. const std::string& imgPath, std::pair<int, float> ref, float norm,
  49. const Size& size = {-1, -1}, Scalar mean = Scalar(),
  50. double scale = 1.0, bool swapRB = false, bool crop = false)
  51. {
  52. checkBackend();
  53. Mat frame = imread(imgPath);
  54. ClassificationModel model(weights, cfg);
  55. model.setInputSize(size).setInputMean(mean).setInputScale(scale)
  56. .setInputSwapRB(swapRB).setInputCrop(crop);
  57. std::pair<int, float> prediction = model.classify(frame);
  58. EXPECT_EQ(prediction.first, ref.first);
  59. ASSERT_NEAR(prediction.second, ref.second, norm);
  60. }
  61. void testKeypointsModel(const std::string& weights, const std::string& cfg,
  62. const Mat& frame, const Mat& exp, float norm,
  63. const Size& size = {-1, -1}, Scalar mean = Scalar(),
  64. double scale = 1.0, bool swapRB = false, bool crop = false)
  65. {
  66. checkBackend();
  67. std::vector<Point2f> points;
  68. KeypointsModel model(weights, cfg);
  69. model.setInputSize(size).setInputMean(mean).setInputScale(scale)
  70. .setInputSwapRB(swapRB).setInputCrop(crop);
  71. model.setPreferableBackend(backend);
  72. model.setPreferableTarget(target);
  73. points = model.estimate(frame, 0.5);
  74. Mat out = Mat(points).reshape(1);
  75. normAssert(exp, out, "", norm, norm);
  76. }
  77. void testSegmentationModel(const std::string& weights_file, const std::string& config_file,
  78. const std::string& inImgPath, const std::string& outImgPath,
  79. float norm, const Size& size = {-1, -1}, Scalar mean = Scalar(),
  80. double scale = 1.0, bool swapRB = false, bool crop = false)
  81. {
  82. checkBackend();
  83. Mat frame = imread(inImgPath);
  84. Mat mask;
  85. Mat exp = imread(outImgPath, 0);
  86. SegmentationModel model(weights_file, config_file);
  87. model.setInputSize(size).setInputMean(mean).setInputScale(scale)
  88. .setInputSwapRB(swapRB).setInputCrop(crop);
  89. model.setPreferableBackend(backend);
  90. model.setPreferableTarget(target);
  91. model.segment(frame, mask);
  92. normAssert(mask, exp, "", norm, norm);
  93. }
  94. void testTextRecognitionModel(const std::string& weights, const std::string& cfg,
  95. const std::string& imgPath, const std::string& seq,
  96. const std::string& decodeType, const std::vector<std::string>& vocabulary,
  97. const Size& size = {-1, -1}, Scalar mean = Scalar(),
  98. double scale = 1.0, bool swapRB = false, bool crop = false)
  99. {
  100. checkBackend();
  101. Mat frame = imread(imgPath, IMREAD_GRAYSCALE);
  102. TextRecognitionModel model(weights, cfg);
  103. model.setDecodeType(decodeType)
  104. .setVocabulary(vocabulary)
  105. .setInputSize(size).setInputMean(mean).setInputScale(scale)
  106. .setInputSwapRB(swapRB).setInputCrop(crop);
  107. model.setPreferableBackend(backend);
  108. model.setPreferableTarget(target);
  109. std::string result = model.recognize(frame);
  110. EXPECT_EQ(result, seq) << "Full frame: " << imgPath;
  111. std::vector<Rect> rois;
  112. rois.push_back(Rect(0, 0, frame.cols, frame.rows));
  113. rois.push_back(Rect(0, 0, frame.cols, frame.rows)); // twice
  114. std::vector<std::string> results;
  115. model.recognize(frame, rois, results);
  116. EXPECT_EQ((size_t)2u, results.size()) << "ROI: " << imgPath;
  117. EXPECT_EQ(results[0], seq) << "ROI[0]: " << imgPath;
  118. EXPECT_EQ(results[1], seq) << "ROI[1]: " << imgPath;
  119. }
  120. void testTextDetectionModelByDB(const std::string& weights, const std::string& cfg,
  121. const std::string& imgPath, const std::vector<std::vector<Point>>& gt,
  122. float binThresh, float polyThresh,
  123. uint maxCandidates, double unclipRatio,
  124. const Size& size = {-1, -1}, Scalar mean = Scalar(), Scalar scale = Scalar::all(1.0),
  125. double boxes_iou_diff = 0.05, bool swapRB = false, bool crop = false)
  126. {
  127. checkBackend();
  128. Mat frame = imread(imgPath);
  129. TextDetectionModel_DB model(weights, cfg);
  130. model.setBinaryThreshold(binThresh)
  131. .setPolygonThreshold(polyThresh)
  132. .setUnclipRatio(unclipRatio)
  133. .setMaxCandidates(maxCandidates)
  134. .setInputSize(size).setInputMean(mean).setInputScale(scale)
  135. .setInputSwapRB(swapRB).setInputCrop(crop);
  136. model.setPreferableBackend(backend);
  137. model.setPreferableTarget(target);
  138. // 1. Check common TextDetectionModel API through RotatedRect
  139. std::vector<cv::RotatedRect> results;
  140. model.detectTextRectangles(frame, results);
  141. EXPECT_GT(results.size(), (size_t)0);
  142. std::vector< std::vector<Point> > contours;
  143. for (size_t i = 0; i < results.size(); i++)
  144. {
  145. const RotatedRect& box = results[i];
  146. Mat contour;
  147. boxPoints(box, contour);
  148. std::vector<Point> contour2i(4);
  149. for (int i = 0; i < 4; i++)
  150. {
  151. contour2i[i].x = cvRound(contour.at<float>(i, 0));
  152. contour2i[i].y = cvRound(contour.at<float>(i, 1));
  153. }
  154. contours.push_back(contour2i);
  155. }
  156. #if 0 // test debug
  157. Mat result = frame.clone();
  158. drawContours(result, contours, -1, Scalar(0, 0, 255), 1);
  159. imshow("result", result); // imwrite("result.png", result);
  160. waitKey(0);
  161. #endif
  162. normAssertTextDetections(gt, contours, "", boxes_iou_diff);
  163. // 2. Check quadrangle-based API
  164. // std::vector< std::vector<Point> > contours;
  165. model.detect(frame, contours);
  166. #if 0 // test debug
  167. Mat result = frame.clone();
  168. drawContours(result, contours, -1, Scalar(0, 0, 255), 1);
  169. imshow("result_contours", result); // imwrite("result_contours.png", result);
  170. waitKey(0);
  171. #endif
  172. normAssertTextDetections(gt, contours, "", boxes_iou_diff);
  173. }
  174. void testTextDetectionModelByEAST(
  175. const std::string& weights, const std::string& cfg,
  176. const std::string& imgPath, const std::vector<RotatedRect>& gt,
  177. float confThresh, float nmsThresh,
  178. const Size& size = {-1, -1}, Scalar mean = Scalar(),
  179. double scale = 1.0, bool swapRB = false, bool crop = false,
  180. double eps_center = 5/*pixels*/, double eps_size = 5/*pixels*/, double eps_angle = 1
  181. )
  182. {
  183. checkBackend();
  184. Mat frame = imread(imgPath);
  185. TextDetectionModel_EAST model(weights, cfg);
  186. model.setConfidenceThreshold(confThresh)
  187. .setNMSThreshold(nmsThresh)
  188. .setInputSize(size).setInputMean(mean).setInputScale(scale)
  189. .setInputSwapRB(swapRB).setInputCrop(crop);
  190. model.setPreferableBackend(backend);
  191. model.setPreferableTarget(target);
  192. std::vector<cv::RotatedRect> results;
  193. model.detectTextRectangles(frame, results);
  194. EXPECT_EQ(results.size(), (size_t)1);
  195. for (size_t i = 0; i < results.size(); i++)
  196. {
  197. const RotatedRect& box = results[i];
  198. #if 0 // test debug
  199. Mat contour;
  200. boxPoints(box, contour);
  201. std::vector<Point> contour2i(4);
  202. for (int i = 0; i < 4; i++)
  203. {
  204. contour2i[i].x = cvRound(contour.at<float>(i, 0));
  205. contour2i[i].y = cvRound(contour.at<float>(i, 1));
  206. }
  207. std::vector< std::vector<Point> > contours;
  208. contours.push_back(contour2i);
  209. Mat result = frame.clone();
  210. drawContours(result, contours, -1, Scalar(0, 0, 255), 1);
  211. imshow("result", result); //imwrite("result.png", result);
  212. waitKey(0);
  213. #endif
  214. const RotatedRect& gtBox = gt[i];
  215. EXPECT_NEAR(box.center.x, gtBox.center.x, eps_center);
  216. EXPECT_NEAR(box.center.y, gtBox.center.y, eps_center);
  217. EXPECT_NEAR(box.size.width, gtBox.size.width, eps_size);
  218. EXPECT_NEAR(box.size.height, gtBox.size.height, eps_size);
  219. EXPECT_NEAR(box.angle, gtBox.angle, eps_angle);
  220. }
  221. }
  222. };
  223. TEST_P(Test_Model, Classify)
  224. {
  225. std::pair<int, float> ref(652, 0.641789);
  226. std::string img_path = _tf("grace_hopper_227.png");
  227. std::string config_file = _tf("bvlc_alexnet.prototxt");
  228. std::string weights_file = _tf("bvlc_alexnet.caffemodel", false);
  229. Size size{227, 227};
  230. float norm = 1e-4;
  231. testClassifyModel(weights_file, config_file, img_path, ref, norm, size);
  232. }
  233. TEST_P(Test_Model, DetectRegion)
  234. {
  235. applyTestTag(
  236. CV_TEST_TAG_LONG,
  237. CV_TEST_TAG_MEMORY_2GB
  238. );
  239. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
  240. // accuracy
  241. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  242. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  243. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  244. // accuracy
  245. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  246. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  247. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) // nGraph compilation failure
  248. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
  249. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  250. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  251. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  252. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
  253. // FIXIT DNN_BACKEND_INFERENCE_ENGINE is misused
  254. if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
  255. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
  256. #endif
  257. #if defined(INF_ENGINE_RELEASE)
  258. if (target == DNN_TARGET_MYRIAD
  259. && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
  260. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
  261. #endif
  262. std::vector<int> refClassIds = {6, 1, 11};
  263. std::vector<float> refConfidences = {0.750469f, 0.780879f, 0.901615f};
  264. std::vector<Rect2d> refBoxes = {Rect2d(240, 53, 135, 72),
  265. Rect2d(112, 109, 192, 200),
  266. Rect2d(58, 141, 117, 249)};
  267. std::string img_path = _tf("dog416.png");
  268. std::string weights_file = _tf("yolo-voc.weights", false);
  269. std::string config_file = _tf("yolo-voc.cfg");
  270. double scale = 1.0 / 255.0;
  271. Size size{416, 416};
  272. bool swapRB = true;
  273. double confThreshold = 0.24;
  274. double nmsThreshold = (target == DNN_TARGET_MYRIAD) ? 0.397 : 0.4;
  275. double scoreDiff = 8e-5, iouDiff = 1e-5;
  276. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_CPU_FP16)
  277. {
  278. scoreDiff = 1e-2;
  279. iouDiff = 1.6e-2;
  280. }
  281. testDetectModel(weights_file, config_file, img_path, refClassIds, refConfidences,
  282. refBoxes, scoreDiff, iouDiff, confThreshold, nmsThreshold, size,
  283. Scalar(), scale, swapRB);
  284. }
  285. TEST_P(Test_Model, DetectRegionWithNmsAcrossClasses)
  286. {
  287. applyTestTag(
  288. CV_TEST_TAG_LONG,
  289. CV_TEST_TAG_MEMORY_2GB
  290. );
  291. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
  292. // accuracy
  293. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  294. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  295. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  296. // accuracy
  297. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  298. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  299. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) // nGraph compilation failure
  300. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
  301. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  302. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  303. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  304. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
  305. if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
  306. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
  307. #endif
  308. #if defined(INF_ENGINE_RELEASE)
  309. if (target == DNN_TARGET_MYRIAD
  310. && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
  311. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
  312. #endif
  313. std::vector<int> refClassIds = { 6, 11 };
  314. std::vector<float> refConfidences = { 0.750469f, 0.901615f };
  315. std::vector<Rect2d> refBoxes = { Rect2d(240, 53, 135, 72),
  316. Rect2d(58, 141, 117, 249) };
  317. std::string img_path = _tf("dog416.png");
  318. std::string weights_file = _tf("yolo-voc.weights", false);
  319. std::string config_file = _tf("yolo-voc.cfg");
  320. double scale = 1.0 / 255.0;
  321. Size size{ 416, 416 };
  322. bool swapRB = true;
  323. bool crop = false;
  324. bool nmsAcrossClasses = true;
  325. double confThreshold = 0.24;
  326. double nmsThreshold = (target == DNN_TARGET_MYRIAD) ? 0.15: 0.15;
  327. double scoreDiff = 8e-5, iouDiff = 1e-5;
  328. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_CPU_FP16)
  329. {
  330. scoreDiff = 1e-2;
  331. iouDiff = 1.6e-2;
  332. }
  333. testDetectModel(weights_file, config_file, img_path, refClassIds, refConfidences,
  334. refBoxes, scoreDiff, iouDiff, confThreshold, nmsThreshold, size,
  335. Scalar(), scale, swapRB, crop,
  336. nmsAcrossClasses);
  337. }
  338. TEST_P(Test_Model, DetectionOutput)
  339. {
  340. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
  341. // Check 'backward_compatible_check || in_out_elements_equal' failed at core/src/op/reshape.cpp:427:
  342. // While validating node 'v1::Reshape bbox_pred_reshape (ave_bbox_pred_rois[0]:f32{1,8,1,1}, Constant_388[0]:i64{4}) -> (f32{?,?,?,?})' with friendly_name 'bbox_pred_reshape':
  343. // Requested output shape {1,300,8,1} is incompatible with input shape {1, 8, 1, 1}
  344. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
  345. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  346. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  347. // Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
  348. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
  349. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  350. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
  351. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  352. #elif defined(INF_ENGINE_RELEASE)
  353. // FIXIT DNN_BACKEND_INFERENCE_ENGINE is misused
  354. if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
  355. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
  356. if (target == DNN_TARGET_MYRIAD)
  357. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
  358. #endif
  359. std::vector<int> refClassIds = {7, 12};
  360. std::vector<float> refConfidences = {0.991359f, 0.94786f};
  361. std::vector<Rect2d> refBoxes = {Rect2d(491, 81, 212, 98),
  362. Rect2d(132, 223, 207, 344)};
  363. std::string img_path = _tf("dog416.png");
  364. std::string weights_file = _tf("resnet50_rfcn_final.caffemodel", false);
  365. std::string config_file = _tf("rfcn_pascal_voc_resnet50.prototxt");
  366. Scalar mean = Scalar(102.9801, 115.9465, 122.7717);
  367. Size size{800, 600};
  368. double scoreDiff = default_l1, iouDiff = 1e-5;
  369. float confThreshold = 0.8;
  370. double nmsThreshold = 0.0;
  371. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_CPU_FP16)
  372. {
  373. if (backend == DNN_BACKEND_OPENCV)
  374. scoreDiff = 4e-3;
  375. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2022010000)
  376. else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  377. scoreDiff = 4e-2;
  378. #endif
  379. else
  380. scoreDiff = 2e-2;
  381. iouDiff = 1.8e-1;
  382. }
  383. testDetectModel(weights_file, config_file, img_path, refClassIds, refConfidences, refBoxes,
  384. scoreDiff, iouDiff, confThreshold, nmsThreshold, size, mean);
  385. }
  386. TEST_P(Test_Model, DetectionMobilenetSSD)
  387. {
  388. Mat ref = blobFromNPY(_tf("mobilenet_ssd_caffe_out.npy"));
  389. ref = ref.reshape(1, ref.size[2]);
  390. std::string img_path = _tf("street.png");
  391. Mat frame = imread(img_path);
  392. int frameWidth = frame.cols;
  393. int frameHeight = frame.rows;
  394. std::vector<int> refClassIds;
  395. std::vector<float> refConfidences;
  396. std::vector<Rect2d> refBoxes;
  397. for (int i = 0; i < ref.rows; i++)
  398. {
  399. refClassIds.emplace_back(ref.at<float>(i, 1));
  400. refConfidences.emplace_back(ref.at<float>(i, 2));
  401. int left = ref.at<float>(i, 3) * frameWidth;
  402. int top = ref.at<float>(i, 4) * frameHeight;
  403. int right = ref.at<float>(i, 5) * frameWidth;
  404. int bottom = ref.at<float>(i, 6) * frameHeight;
  405. int width = right - left + 1;
  406. int height = bottom - top + 1;
  407. refBoxes.emplace_back(left, top, width, height);
  408. }
  409. std::string weights_file = _tf("MobileNetSSD_deploy.caffemodel", false);
  410. std::string config_file = _tf("MobileNetSSD_deploy.prototxt");
  411. Scalar mean = Scalar(127.5, 127.5, 127.5);
  412. double scale = 1.0 / 127.5;
  413. Size size{300, 300};
  414. double scoreDiff = 1e-5, iouDiff = 1e-5;
  415. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_CPU_FP16)
  416. {
  417. scoreDiff = 1.7e-2;
  418. iouDiff = 6.91e-2;
  419. }
  420. else if (target == DNN_TARGET_MYRIAD)
  421. {
  422. scoreDiff = 0.017;
  423. if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
  424. iouDiff = 0.1;
  425. }
  426. else if (target == DNN_TARGET_CUDA_FP16)
  427. {
  428. scoreDiff = 0.0021;
  429. iouDiff = 1e-2;
  430. }
  431. float confThreshold = FLT_MIN;
  432. double nmsThreshold = 0.0;
  433. testDetectModel(weights_file, config_file, img_path, refClassIds, refConfidences, refBoxes,
  434. scoreDiff, iouDiff, confThreshold, nmsThreshold, size, mean, scale);
  435. }
  436. TEST_P(Test_Model, Keypoints_pose)
  437. {
  438. if (target == DNN_TARGET_OPENCL_FP16)
  439. applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
  440. if (target == DNN_TARGET_CPU_FP16)
  441. applyTestTag(CV_TEST_TAG_DNN_SKIP_CPU_FP16);
  442. #ifdef HAVE_INF_ENGINE
  443. if (target == DNN_TARGET_MYRIAD)
  444. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  445. #endif
  446. Mat inp = imread(_tf("pose.png"));
  447. std::string weights = _tf("onnx/models/lightweight_pose_estimation_201912.onnx", false);
  448. float kpdata[] = {
  449. 237.65625f, 78.25f, 237.65625f, 136.9375f,
  450. 190.125f, 136.9375f, 142.59375f, 195.625f, 79.21875f, 176.0625f, 285.1875f, 117.375f,
  451. 348.5625f, 195.625f, 396.09375f, 176.0625f, 205.96875f, 313.0f, 205.96875f, 430.375f,
  452. 205.96875f, 528.1875f, 269.34375f, 293.4375f, 253.5f, 430.375f, 237.65625f, 528.1875f,
  453. 221.8125f, 58.6875f, 253.5f, 58.6875f, 205.96875f, 78.25f, 253.5f, 58.6875f
  454. };
  455. Mat exp(18, 2, CV_32FC1, kpdata);
  456. Size size{256, 256};
  457. float norm = 1e-4;
  458. double scale = 1.0/255;
  459. Scalar mean = Scalar(128, 128, 128);
  460. bool swapRB = false;
  461. // Ref. Range: [58.6875, 508.625]
  462. if (target == DNN_TARGET_CUDA_FP16)
  463. norm = 20; // l1 = 1.5, lInf = 20
  464. testKeypointsModel(weights, "", inp, exp, norm, size, mean, scale, swapRB);
  465. }
  466. TEST_P(Test_Model, Keypoints_face)
  467. {
  468. #if defined(INF_ENGINE_RELEASE)
  469. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
  470. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  471. #endif
  472. Mat inp = imread(_tf("gray_face.png"), 0);
  473. std::string weights = _tf("onnx/models/facial_keypoints.onnx", false);
  474. Mat exp = blobFromNPY(_tf("facial_keypoints_exp.npy"));
  475. Size size{224, 224};
  476. double scale = 1.0/255;
  477. Scalar mean = Scalar();
  478. bool swapRB = false;
  479. // Ref. Range: [-1.1784188, 1.7758257]
  480. float norm = 1e-4;
  481. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_CPU_FP16)
  482. norm = 5e-3;
  483. if (target == DNN_TARGET_MYRIAD)
  484. {
  485. // Myriad2: l1 = 0.0004, lInf = 0.002
  486. // MyriadX: l1 = 0.003, lInf = 0.009
  487. norm = 0.009;
  488. }
  489. if (target == DNN_TARGET_CUDA_FP16)
  490. norm = 0.004; // l1 = 0.0006, lInf = 0.004
  491. testKeypointsModel(weights, "", inp, exp, norm, size, mean, scale, swapRB);
  492. }
  493. TEST_P(Test_Model, Detection_normalized)
  494. {
  495. std::string img_path = _tf("grace_hopper_227.png");
  496. std::vector<int> refClassIds = {15};
  497. std::vector<float> refConfidences = {0.999222f};
  498. std::vector<Rect2d> refBoxes = {Rect2d(0, 4, 227, 222)};
  499. std::string weights_file = _tf("MobileNetSSD_deploy.caffemodel", false);
  500. std::string config_file = _tf("MobileNetSSD_deploy.prototxt");
  501. Scalar mean = Scalar(127.5, 127.5, 127.5);
  502. double scale = 1.0 / 127.5;
  503. Size size{300, 300};
  504. double scoreDiff = 1e-5, iouDiff = 1e-5;
  505. float confThreshold = FLT_MIN;
  506. double nmsThreshold = 0.0;
  507. if (target == DNN_TARGET_CUDA)
  508. {
  509. scoreDiff = 3e-4;
  510. iouDiff = 0.018;
  511. }
  512. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_CPU_FP16)
  513. {
  514. scoreDiff = 5e-3;
  515. iouDiff = 0.09;
  516. }
  517. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
  518. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
  519. {
  520. scoreDiff = 0.02;
  521. iouDiff = 0.1f;
  522. }
  523. #endif
  524. testDetectModel(weights_file, config_file, img_path, refClassIds, refConfidences, refBoxes,
  525. scoreDiff, iouDiff, confThreshold, nmsThreshold, size, mean, scale);
  526. }
  527. TEST_P(Test_Model, Segmentation)
  528. {
  529. applyTestTag(
  530. CV_TEST_TAG_MEMORY_2GB
  531. );
  532. float norm = 0;
  533. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
  534. // Failed to allocate graph: NC_ERROR
  535. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
  536. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  537. // accuracy
  538. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
  539. {
  540. norm = 25.0f; // depends on OS/OpenCL version
  541. }
  542. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  543. // Failed to allocate graph: NC_ERROR
  544. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
  545. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  546. // cnn_network_ngraph_impl.cpp:104 Function contains several inputs and outputs with one friendly name: 'upscore2'!
  547. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
  548. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  549. // cnn_network_ngraph_impl.cpp:104 Function contains several inputs and outputs with one friendly name: 'upscore2'!
  550. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
  551. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  552. #elif defined(INF_ENGINE_RELEASE)
  553. // Failed to allocate graph: NC_ERROR
  554. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
  555. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  556. #endif
  557. if ((backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_CPU_FP16))
  558. || (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16))
  559. {
  560. norm = 2.0f; // l1 = 0.01 lInf = 2
  561. }
  562. std::string inp = _tf("dog416.png");
  563. std::string weights_file = _tf("fcn8s-heavy-pascal.prototxt");
  564. std::string config_file = _tf("fcn8s-heavy-pascal.caffemodel", false);
  565. std::string exp = _tf("segmentation_exp.png");
  566. Size size{128, 128};
  567. double scale = 1.0;
  568. Scalar mean = Scalar();
  569. bool swapRB = false;
  570. testSegmentationModel(weights_file, config_file, inp, exp, norm, size, mean, scale, swapRB);
  571. }
  572. TEST_P(Test_Model, TextRecognition)
  573. {
  574. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
  575. // FIXIT: dnn/src/ie_ngraph.cpp:494: error: (-215:Assertion failed) !inps.empty() in function 'createNet'
  576. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU)
  577. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  578. // Node Transpose_79 was not assigned on any pointed device
  579. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
  580. applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
  581. CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
  582. );
  583. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  584. // IE Exception: Ngraph operation Reshape with name 71 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
  585. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
  586. applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
  587. CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
  588. );
  589. #endif
  590. std::string imgPath = _tf("text_rec_test.png");
  591. std::string weightPath = _tf("onnx/models/crnn.onnx", false);
  592. std::string seq = "welcome";
  593. Size size{100, 32};
  594. double scale = 1.0 / 127.5;
  595. Scalar mean = Scalar(127.5);
  596. std::string decodeType = "CTC-greedy";
  597. std::vector<std::string> vocabulary = {"0","1","2","3","4","5","6","7","8","9",
  598. "a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
  599. testTextRecognitionModel(weightPath, "", imgPath, seq, decodeType, vocabulary, size, mean, scale);
  600. }
  601. TEST_P(Test_Model, TextRecognitionWithCTCPrefixBeamSearch)
  602. {
  603. #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
  604. // Node Transpose_79 was not assigned on any pointed device
  605. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
  606. applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
  607. CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
  608. );
  609. #elif defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
  610. // IE Exception: Ngraph operation Reshape with name 71 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
  611. if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
  612. applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
  613. CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
  614. );
  615. #endif
  616. std::string imgPath = _tf("text_rec_test.png");
  617. std::string weightPath = _tf("onnx/models/crnn.onnx", false);
  618. std::string seq = "welcome";
  619. Size size{100, 32};
  620. double scale = 1.0 / 127.5;
  621. Scalar mean = Scalar(127.5);
  622. std::string decodeType = "CTC-prefix-beam-search";
  623. std::vector<std::string> vocabulary = {"0","1","2","3","4","5","6","7","8","9",
  624. "a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
  625. testTextRecognitionModel(weightPath, "", imgPath, seq, decodeType, vocabulary, size, mean, scale);
  626. }
  627. TEST_P(Test_Model, TextDetectionByDB)
  628. {
  629. if (target == DNN_TARGET_OPENCL_FP16)
  630. applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
  631. if (target == DNN_TARGET_CPU_FP16)
  632. applyTestTag(CV_TEST_TAG_DNN_SKIP_CPU_FP16);
  633. std::string imgPath = _tf("text_det_test1.png");
  634. std::string weightPathDB = _tf("onnx/models/DB_TD500_resnet50.onnx", false);
  635. std::string weightPathPPDB = _tf("onnx/models/PP_OCRv3_DB_text_det.onnx", false);
  636. // GroundTruth
  637. std::vector<std::vector<Point>> gt = {
  638. { Point(142, 193), Point(136, 164), Point(213, 150), Point(219, 178) },
  639. { Point(136, 165), Point(122, 114), Point(319, 71), Point(330, 122) }
  640. };
  641. Size size{736, 736};
  642. Scalar scaleDB = Scalar::all(1.0 / 255.0);
  643. Scalar meanDB = Scalar(122.67891434, 116.66876762, 104.00698793);
  644. // new mean and stddev
  645. Scalar meanPPDB = Scalar(123.675, 116.28, 103.53);
  646. Scalar stddevPPDB = Scalar(0.229, 0.224, 0.225);
  647. Scalar scalePPDB = scaleDB / stddevPPDB;
  648. float binThresh = 0.3;
  649. float polyThresh = 0.5;
  650. uint maxCandidates = 200;
  651. double unclipRatio = 2.0;
  652. {
  653. SCOPED_TRACE("Original DB");
  654. testTextDetectionModelByDB(weightPathDB, "", imgPath, gt, binThresh, polyThresh, maxCandidates, unclipRatio, size, meanDB, scaleDB, 0.05f);
  655. }
  656. {
  657. SCOPED_TRACE("PP-OCRDBv3");
  658. testTextDetectionModelByDB(weightPathPPDB, "", imgPath, gt, binThresh, polyThresh, maxCandidates, unclipRatio, size, meanPPDB, scalePPDB, 0.21f);
  659. }
  660. }
  661. TEST_P(Test_Model, TextDetectionByEAST)
  662. {
  663. std::string imgPath = _tf("text_det_test2.jpg");
  664. std::string weightPath = _tf("frozen_east_text_detection.pb", false);
  665. // GroundTruth
  666. std::vector<RotatedRect> gt = {
  667. RotatedRect(Point2f(657.55f, 409.5f), Size2f(316.84f, 62.45f), -4.79)
  668. };
  669. // Model parameters
  670. Size size{320, 320};
  671. double scale = 1.0;
  672. Scalar mean = Scalar(123.68, 116.78, 103.94);
  673. bool swapRB = true;
  674. // Detection algorithm parameters
  675. float confThresh = 0.5;
  676. float nmsThresh = 0.4;
  677. double eps_center = 5/*pixels*/;
  678. double eps_size = 5/*pixels*/;
  679. double eps_angle = 1;
  680. if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_CUDA_FP16 || target == DNN_TARGET_MYRIAD || target == DNN_TARGET_CPU_FP16)
  681. {
  682. eps_center = 10;
  683. eps_size = 25;
  684. eps_angle = 3;
  685. }
  686. testTextDetectionModelByEAST(weightPath, "", imgPath, gt, confThresh, nmsThresh, size, mean, scale, swapRB, false/*crop*/,
  687. eps_center, eps_size, eps_angle
  688. );
  689. }
  690. INSTANTIATE_TEST_CASE_P(/**/, Test_Model, dnnBackendsAndTargets());
  691. }} // namespace