test_misc.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. //
  5. // Copyright (C) 2017, Intel Corporation, all rights reserved.
  6. // Third party copyrights are property of their respective owners.
  7. #include "test_precomp.hpp"
  8. #include <opencv2/core/ocl.hpp>
  9. #include <opencv2/core/opencl/ocl_defs.hpp>
  10. #include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
  11. namespace opencv_test { namespace {
  12. TEST(blobFromImage_4ch, Regression)
  13. {
  14. Mat ch[4];
  15. for(int i = 0; i < 4; i++)
  16. ch[i] = Mat::ones(10, 10, CV_8U)*i;
  17. Mat img;
  18. merge(ch, 4, img);
  19. Mat blob = dnn::blobFromImage(img, 1., Size(), Scalar(), false, false);
  20. for(int i = 0; i < 4; i++)
  21. {
  22. ch[i] = Mat(img.rows, img.cols, CV_32F, blob.ptr(0, i));
  23. ASSERT_DOUBLE_EQ(cvtest::norm(ch[i], cv::NORM_INF), i);
  24. }
  25. }
  26. TEST(blobFromImage, allocated)
  27. {
  28. int size[] = {1, 3, 4, 5};
  29. Mat img(size[2], size[3], CV_32FC(size[1]));
  30. Mat blob(4, size, CV_32F);
  31. void* blobData = blob.data;
  32. dnn::blobFromImage(img, blob, 1.0 / 255, Size(), Scalar(), false, false);
  33. ASSERT_EQ(blobData, blob.data);
  34. }
  35. TEST(imagesFromBlob, Regression)
  36. {
  37. int nbOfImages = 8;
  38. std::vector<cv::Mat> inputImgs(nbOfImages);
  39. for (int i = 0; i < nbOfImages; i++)
  40. {
  41. inputImgs[i] = cv::Mat::ones(100, 100, CV_32FC3);
  42. cv::randu(inputImgs[i], cv::Scalar::all(0), cv::Scalar::all(1));
  43. }
  44. cv::Mat blob = cv::dnn::blobFromImages(inputImgs, 1., cv::Size(), cv::Scalar(), false, false);
  45. std::vector<cv::Mat> outputImgs;
  46. cv::dnn::imagesFromBlob(blob, outputImgs);
  47. for (int i = 0; i < nbOfImages; i++)
  48. {
  49. EXPECT_EQ(0, cvtest::norm(inputImgs[i], outputImgs[i], NORM_INF))
  50. << "i=" << i
  51. << " inputImgs[i]=" << inputImgs[i].size
  52. << " outputImgs[i]=" << outputImgs[i].size;
  53. }
  54. }
  55. TEST(blobFromImageWithParams_4ch, NHWC_scalar_scale)
  56. {
  57. Mat img(10, 10, CV_8UC4, cv::Scalar(0,1,2,3));
  58. std::vector<double> factorVec = {0.1, 0.2, 0.3, 0.4};
  59. Scalar scalefactor(factorVec[0], factorVec[1], factorVec[2], factorVec[3]);
  60. Image2BlobParams param;
  61. param.scalefactor = scalefactor;
  62. param.datalayout = DNN_LAYOUT_NHWC;
  63. Mat blob = dnn::blobFromImageWithParams(img, param); // [1, 10, 10, 4]
  64. float* blobPtr = blob.ptr<float>(0);
  65. std::vector<float> targetVec = {(float )factorVec[0] * 0, (float )factorVec[1] * 1, (float )factorVec[2] * 2, (float )factorVec[3] * 3}; // Target Value.
  66. for (int hi = 0; hi < 10; hi++)
  67. {
  68. for (int wi = 0; wi < 10; wi++)
  69. {
  70. float* hwPtr = blobPtr + hi * 10 * 4 + wi * 4;
  71. // Check equal
  72. EXPECT_NEAR(hwPtr[0], targetVec[0], 1e-5);
  73. EXPECT_NEAR(hwPtr[1], targetVec[1], 1e-5);
  74. EXPECT_NEAR(hwPtr[2], targetVec[2], 1e-5);
  75. EXPECT_NEAR(hwPtr[3], targetVec[3], 1e-5);
  76. }
  77. }
  78. }
  79. TEST(blobFromImageWithParams_4ch, letter_box)
  80. {
  81. Mat img(40, 20, CV_8UC4, cv::Scalar(0,1,2,3));
  82. // Construct target mat.
  83. Mat targetCh[4];
  84. // The letterbox will add zero at the left and right of output blob.
  85. // After the letterbox, every row data would have same value showing as valVec.
  86. std::vector<uint8_t> valVec = {0,0,0,0,0, 1,1,1,1,1,1,1,1,1,1, 0,0,0,0,0};
  87. Mat rowM(1, 20, CV_8UC1, valVec.data());
  88. for(int i = 0; i < 4; i++)
  89. {
  90. targetCh[i] = rowM * i;
  91. }
  92. Mat targetImg;
  93. merge(targetCh, 4, targetImg);
  94. Size targeSize(20, 20);
  95. Image2BlobParams param;
  96. param.size = targeSize;
  97. param.paddingmode = DNN_PMODE_LETTERBOX;
  98. Mat blob = dnn::blobFromImageWithParams(img, param);
  99. Mat targetBlob = dnn::blobFromImage(targetImg, 1.0, targeSize); // only convert data from uint8 to float32.
  100. EXPECT_EQ(0, cvtest::norm(targetBlob, blob, NORM_INF));
  101. }
  102. TEST(readNet, Regression)
  103. {
  104. Net net = readNet(findDataFile("dnn/squeezenet_v1.1.prototxt"),
  105. findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
  106. EXPECT_FALSE(net.empty());
  107. net = readNet(findDataFile("dnn/opencv_face_detector.caffemodel", false),
  108. findDataFile("dnn/opencv_face_detector.prototxt"));
  109. EXPECT_FALSE(net.empty());
  110. net = readNet(findDataFile("dnn/openface_nn4.small2.v1.t7", false));
  111. EXPECT_FALSE(net.empty());
  112. net = readNet(findDataFile("dnn/tiny-yolo-voc.cfg"),
  113. findDataFile("dnn/tiny-yolo-voc.weights", false));
  114. EXPECT_FALSE(net.empty());
  115. net = readNet(findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt"),
  116. findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false));
  117. EXPECT_FALSE(net.empty());
  118. }
  119. TEST(readNet, do_not_call_setInput) // https://github.com/opencv/opencv/issues/16618
  120. {
  121. // 1. load network
  122. const string proto = findDataFile("dnn/squeezenet_v1.1.prototxt");
  123. const string model = findDataFile("dnn/squeezenet_v1.1.caffemodel", false);
  124. Net net = readNetFromCaffe(proto, model);
  125. // 2. mistake: no inputs are specified through .setInput()
  126. // 3. try inference
  127. Mat res;
  128. EXPECT_THROW(
  129. {
  130. res = net.forward(); // no inputs after loading => should fail
  131. }, cv::Exception);
  132. EXPECT_TRUE(res.empty()) << res.size;
  133. }
  134. TEST(Net, empty_forward_18392)
  135. {
  136. cv::dnn::Net net;
  137. Mat image(Size(512, 512), CV_8UC3, Scalar::all(0));
  138. Mat inputBlob = cv::dnn::blobFromImage(image, 1.0, Size(512, 512), Scalar(0,0,0), true, false);
  139. net.setInput(inputBlob);
  140. EXPECT_ANY_THROW(Mat output = net.forward());
  141. }
  142. #ifdef HAVE_INF_ENGINE
  143. static
  144. void test_readNet_IE_do_not_call_setInput(Backend backendId)
  145. {
  146. const Target targetId = DNN_TARGET_CPU;
  147. const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
  148. const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
  149. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  150. Net net = readNet(model, proto);
  151. net.setPreferableBackend(backendId);
  152. net.setPreferableTarget(targetId);
  153. // 2. mistake: no inputs are specified through .setInput()
  154. // 3. try inference
  155. Mat res;
  156. EXPECT_THROW(
  157. {
  158. res = net.forward(); // no inputs after loading => should fail
  159. }, cv::Exception);
  160. EXPECT_TRUE(res.empty()) << res.size;
  161. }
  162. #ifdef HAVE_DNN_IE_NN_BUILDER_2019
  163. TEST(readNet, do_not_call_setInput_IE_NN_BUILDER_2019)
  164. {
  165. test_readNet_IE_do_not_call_setInput(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
  166. }
  167. #endif
  168. #ifdef HAVE_DNN_NGRAPH
  169. TEST(readNet, do_not_call_setInput_IE_NGRAPH)
  170. {
  171. test_readNet_IE_do_not_call_setInput(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
  172. }
  173. #endif
  174. #endif // HAVE_INF_ENGINE
  175. typedef testing::TestWithParam<tuple<Backend, Target> > dump;
  176. TEST_P(dump, Regression)
  177. {
  178. const int backend = get<0>(GetParam());
  179. const int target = get<1>(GetParam());
  180. Net net = readNet(findDataFile("dnn/squeezenet_v1.1.prototxt"),
  181. findDataFile("dnn/squeezenet_v1.1.caffemodel", false));
  182. ASSERT_EQ(net.getLayerInputs(net.getLayerId("fire2/concat")).size(), 2);
  183. int size[] = {1, 3, 227, 227};
  184. Mat input = cv::Mat::ones(4, size, CV_32F);
  185. net.setInput(input);
  186. net.setPreferableBackend(backend);
  187. net.setPreferableTarget(target);
  188. EXPECT_FALSE(net.dump().empty());
  189. net.forward();
  190. EXPECT_FALSE(net.dump().empty());
  191. }
  192. INSTANTIATE_TEST_CASE_P(/**/, dump, dnnBackendsAndTargets());
  193. class FirstCustomLayer CV_FINAL : public Layer
  194. {
  195. public:
  196. FirstCustomLayer(const LayerParams &params) : Layer(params) {}
  197. static Ptr<Layer> create(LayerParams& params)
  198. {
  199. return Ptr<Layer>(new FirstCustomLayer(params));
  200. }
  201. void forward(InputArrayOfArrays, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
  202. {
  203. CV_TRACE_FUNCTION();
  204. CV_TRACE_ARG_VALUE(name, "name", name.c_str());
  205. std::vector<Mat> outputs;
  206. outputs_arr.getMatVector(outputs);
  207. outputs[0].setTo(1);
  208. }
  209. };
  210. class SecondCustomLayer CV_FINAL : public Layer
  211. {
  212. public:
  213. SecondCustomLayer(const LayerParams &params) : Layer(params) {}
  214. static Ptr<Layer> create(LayerParams& params)
  215. {
  216. return Ptr<Layer>(new SecondCustomLayer(params));
  217. }
  218. void forward(InputArrayOfArrays, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays) CV_OVERRIDE
  219. {
  220. CV_TRACE_FUNCTION();
  221. CV_TRACE_ARG_VALUE(name, "name", name.c_str());
  222. std::vector<Mat> outputs;
  223. outputs_arr.getMatVector(outputs);
  224. outputs[0].setTo(2);
  225. }
  226. };
  227. TEST(LayerFactory, custom_layers)
  228. {
  229. LayerParams lp;
  230. lp.name = "name";
  231. lp.type = "CustomType";
  232. Mat inp(1, 1, CV_32FC1);
  233. for (int i = 0; i < 3; ++i)
  234. {
  235. if (i == 0) { CV_DNN_REGISTER_LAYER_CLASS(CustomType, FirstCustomLayer); }
  236. else if (i == 1) { CV_DNN_REGISTER_LAYER_CLASS(CustomType, SecondCustomLayer); }
  237. else if (i == 2) { LayerFactory::unregisterLayer("CustomType"); }
  238. Net net;
  239. net.addLayerToPrev(lp.name, lp.type, lp);
  240. net.setInput(inp);
  241. net.setPreferableBackend(DNN_BACKEND_OPENCV);
  242. Mat output = net.forward();
  243. if (i == 0) { EXPECT_EQ(output.at<float>(0), 1); }
  244. else if (i == 1) { EXPECT_EQ(output.at<float>(0), 2); }
  245. else if (i == 2) { EXPECT_EQ(output.at<float>(0), 1); }
  246. }
  247. LayerFactory::unregisterLayer("CustomType");
  248. }
  249. typedef testing::TestWithParam<tuple<float, Vec3f, int, tuple<Backend, Target> > > setInput;
  250. TEST_P(setInput, normalization)
  251. {
  252. const float kScale = get<0>(GetParam());
  253. const Scalar kMean = get<1>(GetParam());
  254. const int dtype = get<2>(GetParam());
  255. const int backend = get<0>(get<3>(GetParam()));
  256. const int target = get<1>(get<3>(GetParam()));
  257. const bool kSwapRB = true;
  258. if(backend == DNN_BACKEND_CUDA)
  259. applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
  260. if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 && dtype != CV_32F)
  261. applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
  262. if (backend == DNN_BACKEND_VKCOM && dtype != CV_32F)
  263. applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN);
  264. Mat inp(5, 5, CV_8UC3);
  265. randu(inp, 0, 255);
  266. Mat ref = blobFromImage(inp, kScale, Size(), kMean, kSwapRB, /*crop*/false);
  267. LayerParams lp;
  268. Net net;
  269. net.addLayerToPrev("testLayer", "Identity", lp);
  270. net.setPreferableBackend(backend);
  271. net.setPreferableTarget(target);
  272. Mat blob = blobFromImage(inp, 1.0, Size(), Scalar(), kSwapRB, /*crop*/false, dtype);
  273. ASSERT_EQ(blob.type(), dtype);
  274. net.setInput(blob, "", kScale, kMean);
  275. Mat out = net.forward();
  276. ASSERT_EQ(out.type(), CV_32F);
  277. normAssert(ref, out, "", 4e-4, 1e-3);
  278. }
  279. INSTANTIATE_TEST_CASE_P(/**/, setInput, Combine(
  280. Values(1.0f, 1.0 / 127.5),
  281. Values(Vec3f(), Vec3f(50, 50, 50), Vec3f(10, 50, 140)),
  282. Values(CV_32F, CV_8U),
  283. dnnBackendsAndTargets()
  284. ));
  285. class CustomLayerWithDeprecatedForward CV_FINAL : public Layer
  286. {
  287. public:
  288. CustomLayerWithDeprecatedForward(const LayerParams &params) : Layer(params) {}
  289. static Ptr<Layer> create(LayerParams& params)
  290. {
  291. return Ptr<Layer>(new CustomLayerWithDeprecatedForward(params));
  292. }
  293. virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
  294. {
  295. CV_Assert_N(inputs[0]->depth() == CV_32F, outputs[0].depth() == CV_32F);
  296. cv::add(*inputs[0], 0.5f, outputs[0]);
  297. }
  298. };
  299. class CustomLayerWithDeprecatedForwardAndFallback CV_FINAL : public Layer
  300. {
  301. public:
  302. CustomLayerWithDeprecatedForwardAndFallback(const LayerParams &params) : Layer(params) {}
  303. static Ptr<Layer> create(LayerParams& params)
  304. {
  305. return Ptr<Layer>(new CustomLayerWithDeprecatedForwardAndFallback(params));
  306. }
  307. void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) CV_OVERRIDE
  308. {
  309. CV_TRACE_FUNCTION();
  310. CV_TRACE_ARG_VALUE(name, "name", name.c_str());
  311. CV_OCL_RUN(preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16,
  312. forward_ocl(inputs, outputs, internals));
  313. Layer::forward_fallback(inputs, outputs, internals);
  314. }
  315. virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
  316. {
  317. CV_Assert_N(inputs[0]->depth() == CV_32F, outputs[0].depth() == CV_32F);
  318. cv::add(*inputs[0], 0.5f, outputs[0]);
  319. }
  320. #ifdef HAVE_OPENCL
  321. bool forward_ocl(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
  322. {
  323. if (inputs_arr.depth() != CV_32F)
  324. return false;
  325. std::vector<UMat> inputs;
  326. std::vector<UMat> outputs;
  327. inputs_arr.getUMatVector(inputs);
  328. outputs_arr.getUMatVector(outputs);
  329. cv::add(inputs[0], 0.5f, outputs[0]);
  330. return true;
  331. }
  332. #endif
  333. };
  334. typedef testing::TestWithParam<tuple<Backend, Target> > DeprecatedForward;
  335. TEST_P(DeprecatedForward, CustomLayer)
  336. {
  337. const int backend = get<0>(GetParam());
  338. const int target = get<1>(GetParam());
  339. Mat inp(5, 5, CV_32FC1);
  340. randu(inp, -1.0f, 1.0f);
  341. inp = blobFromImage(inp);
  342. CV_DNN_REGISTER_LAYER_CLASS(CustomType, CustomLayerWithDeprecatedForward);
  343. try
  344. {
  345. LayerParams lp;
  346. Net net;
  347. net.addLayerToPrev("testLayer", "CustomType", lp);
  348. net.setPreferableBackend(backend);
  349. net.setPreferableTarget(target);
  350. net.setInput(inp);
  351. Mat out = net.forward();
  352. normAssert(out, inp + 0.5f, "", 2e-4, 7e-4);
  353. }
  354. catch (...)
  355. {
  356. LayerFactory::unregisterLayer("CustomType");
  357. throw;
  358. }
  359. LayerFactory::unregisterLayer("CustomType");
  360. }
  361. TEST_P(DeprecatedForward, CustomLayerWithFallback)
  362. {
  363. const int backend = get<0>(GetParam());
  364. const int target = get<1>(GetParam());
  365. Mat inp(5, 5, CV_32FC1);
  366. randu(inp, -1.0f, 1.0f);
  367. inp = blobFromImage(inp);
  368. CV_DNN_REGISTER_LAYER_CLASS(CustomType, CustomLayerWithDeprecatedForwardAndFallback);
  369. try
  370. {
  371. LayerParams lp;
  372. Net net;
  373. net.addLayerToPrev("testLayer", "CustomType", lp);
  374. net.setPreferableBackend(backend);
  375. net.setPreferableTarget(target);
  376. net.setInput(inp);
  377. Mat out = net.forward();
  378. normAssert(out, inp + 0.5f, "", 2e-4, 7e-4);
  379. }
  380. catch (...)
  381. {
  382. LayerFactory::unregisterLayer("CustomType");
  383. throw;
  384. }
  385. LayerFactory::unregisterLayer("CustomType");
  386. }
  387. INSTANTIATE_TEST_CASE_P(/**/, DeprecatedForward, dnnBackendsAndTargets());
  388. TEST(Net, forwardAndRetrieve)
  389. {
  390. std::string prototxt =
  391. "input: \"data\"\n"
  392. "layer {\n"
  393. " name: \"testLayer\"\n"
  394. " type: \"Slice\"\n"
  395. " bottom: \"data\"\n"
  396. " top: \"firstCopy\"\n"
  397. " top: \"secondCopy\"\n"
  398. " slice_param {\n"
  399. " axis: 0\n"
  400. " slice_point: 2\n"
  401. " }\n"
  402. "}";
  403. Net net = readNetFromCaffe(&prototxt[0], prototxt.size());
  404. net.setPreferableBackend(DNN_BACKEND_OPENCV);
  405. Mat inp(4, 5, CV_32F);
  406. randu(inp, -1, 1);
  407. net.setInput(inp);
  408. std::vector<String> outNames;
  409. outNames.push_back("testLayer");
  410. std::vector<std::vector<Mat> > outBlobs;
  411. net.forward(outBlobs, outNames);
  412. EXPECT_EQ(outBlobs.size(), 1);
  413. EXPECT_EQ(outBlobs[0].size(), 2);
  414. normAssert(outBlobs[0][0], inp.rowRange(0, 2), "first part");
  415. normAssert(outBlobs[0][1], inp.rowRange(2, 4), "second part");
  416. }
  417. #ifdef HAVE_INF_ENGINE
  418. static const std::chrono::milliseconds async_timeout(10000);
  419. // This test runs network in synchronous mode for different inputs and then
  420. // runs the same model asynchronously for the same inputs.
  421. typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Async;
  422. TEST_P(Async, model_optimizer_pipeline_set_and_forward_single)
  423. {
  424. const int dtype = get<0>(GetParam());
  425. const Backend backendId = get<0>(get<1>(GetParam()));
  426. const Target targetId = get<1>(get<1>(GetParam()));
  427. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD)
  428. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  429. if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  430. throw SkipTestException("No support for async forward");
  431. const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
  432. const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
  433. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  434. Net netSync = readNet(model, proto);
  435. netSync.setPreferableBackend(backendId);
  436. netSync.setPreferableTarget(targetId);
  437. Net netAsync = readNet(model, proto);
  438. netAsync.setPreferableBackend(backendId);
  439. netAsync.setPreferableTarget(targetId);
  440. // Generate inputs.
  441. const int numInputs = 10;
  442. std::vector<Mat> inputs(numInputs);
  443. int blobSize[] = {2, 6, 75, 113};
  444. for (int i = 0; i < numInputs; ++i)
  445. {
  446. inputs[i].create(4, &blobSize[0], dtype);
  447. randu(inputs[i], 0, 255);
  448. }
  449. // Run synchronously.
  450. std::vector<Mat> refs(numInputs);
  451. for (int i = 0; i < numInputs; ++i)
  452. {
  453. netSync.setInput(inputs[i]);
  454. refs[i] = netSync.forward().clone();
  455. }
  456. // Run asynchronously. To make test more robust, process inputs in the reversed order.
  457. for (int i = numInputs - 1; i >= 0; --i)
  458. {
  459. netAsync.setInput(inputs[i]);
  460. AsyncArray out = netAsync.forwardAsync();
  461. ASSERT_TRUE(out.valid());
  462. Mat result;
  463. EXPECT_TRUE(out.get(result, async_timeout));
  464. normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
  465. }
  466. }
  467. TEST_P(Async, model_optimizer_pipeline_set_and_forward_all)
  468. {
  469. const int dtype = get<0>(GetParam());
  470. const Backend backendId = get<0>(get<1>(GetParam()));
  471. const Target targetId = get<1>(get<1>(GetParam()));
  472. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD)
  473. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  474. if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  475. throw SkipTestException("No support for async forward");
  476. const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
  477. const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
  478. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  479. Net netSync = readNet(model, proto);
  480. netSync.setPreferableBackend(backendId);
  481. netSync.setPreferableTarget(targetId);
  482. Net netAsync = readNet(model, proto);
  483. netAsync.setPreferableBackend(backendId);
  484. netAsync.setPreferableTarget(targetId);
  485. // Generate inputs.
  486. const int numInputs = 10;
  487. std::vector<Mat> inputs(numInputs);
  488. int blobSize[] = {2, 6, 75, 113};
  489. for (int i = 0; i < numInputs; ++i)
  490. {
  491. inputs[i].create(4, &blobSize[0], dtype);
  492. randu(inputs[i], 0, 255);
  493. }
  494. // Run synchronously.
  495. std::vector<Mat> refs(numInputs);
  496. for (int i = 0; i < numInputs; ++i)
  497. {
  498. netSync.setInput(inputs[i]);
  499. refs[i] = netSync.forward().clone();
  500. }
  501. // Run asynchronously. To make test more robust, process inputs in the reversed order.
  502. std::vector<AsyncArray> outs(numInputs);
  503. for (int i = numInputs - 1; i >= 0; --i)
  504. {
  505. netAsync.setInput(inputs[i]);
  506. outs[i] = netAsync.forwardAsync();
  507. }
  508. for (int i = numInputs - 1; i >= 0; --i)
  509. {
  510. ASSERT_TRUE(outs[i].valid());
  511. Mat result;
  512. EXPECT_TRUE(outs[i].get(result, async_timeout));
  513. normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
  514. }
  515. }
  516. TEST_P(Async, create_layer_pipeline_set_and_forward_all)
  517. {
  518. const int dtype = get<0>(GetParam());
  519. const Backend backendId = get<0>(get<1>(GetParam()));
  520. const Target targetId = get<1>(get<1>(GetParam()));
  521. if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  522. throw SkipTestException("No support for async forward");
  523. // Exception: Default implementation fallbacks in asynchronous mode
  524. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U)
  525. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
  526. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  527. Net netSync;
  528. Net netAsync;
  529. {
  530. int inChannels = 4;
  531. int outChannels = 12;
  532. int group = 3;
  533. Size inSize(113, 75);
  534. Size kernel(4, 5);
  535. Size stride(2, 3);
  536. Size pad(0, 1);
  537. Size dilation(1, 1);
  538. bool hasBias = true;
  539. int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width};
  540. Mat weights(4, &sz[0], CV_32F);
  541. randu(weights, -1.0f, 1.0f);
  542. LayerParams lp;
  543. lp.set("kernel_w", kernel.width);
  544. lp.set("kernel_h", kernel.height);
  545. lp.set("pad_w", pad.width);
  546. lp.set("pad_h", pad.height);
  547. lp.set("stride_w", stride.width);
  548. lp.set("stride_h", stride.height);
  549. lp.set("dilation_w", dilation.width);
  550. lp.set("dilation_h", dilation.height);
  551. lp.set("num_output", outChannels);
  552. lp.set("group", group);
  553. lp.set("bias_term", hasBias);
  554. lp.type = "Convolution";
  555. lp.name = "testLayer";
  556. lp.blobs.push_back(weights);
  557. if (hasBias)
  558. {
  559. Mat bias(1, outChannels, CV_32F);
  560. randu(bias, -1.0f, 1.0f);
  561. lp.blobs.push_back(bias);
  562. }
  563. int inpSz[] = {1, inChannels, inSize.height, inSize.width};
  564. Mat input(4, &inpSz[0], CV_32F);
  565. netSync.addLayerToPrev(lp.name, lp.type, lp);
  566. netAsync.addLayerToPrev(lp.name, lp.type, lp);
  567. }
  568. netSync.setPreferableBackend(backendId);
  569. netSync.setPreferableTarget(targetId);
  570. netAsync.setPreferableBackend(backendId);
  571. netAsync.setPreferableTarget(targetId);
  572. // Generate inputs.
  573. const int numInputs = 10;
  574. std::vector<Mat> inputs(numInputs);
  575. int blobSize[] = {1, 4, 75, 113};
  576. for (int i = 0; i < numInputs; ++i)
  577. {
  578. inputs[i].create(4, &blobSize[0], dtype);
  579. randu(inputs[i], 0, 255);
  580. }
  581. // Run synchronously.
  582. std::vector<Mat> refs(numInputs);
  583. for (int i = 0; i < numInputs; ++i)
  584. {
  585. netSync.setInput(inputs[i]);
  586. refs[i] = netSync.forward().clone();
  587. }
  588. // Run asynchronously. To make test more robust, process inputs in the reversed order.
  589. std::vector<AsyncArray> outs(numInputs);
  590. for (int i = numInputs - 1; i >= 0; --i)
  591. {
  592. netAsync.setInput(inputs[i]);
  593. outs[i] = netAsync.forwardAsync();
  594. }
  595. for (int i = numInputs - 1; i >= 0; --i)
  596. {
  597. ASSERT_TRUE(outs[i].valid());
  598. Mat result;
  599. EXPECT_TRUE(outs[i].get(result, async_timeout));
  600. normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
  601. }
  602. }
  603. INSTANTIATE_TEST_CASE_P(/**/, Async, Combine(
  604. Values(CV_32F, CV_8U),
  605. dnnBackendsAndTargetsIE()
  606. ));
  607. typedef testing::TestWithParam<tuple<Backend, Target> > Test_Model_Optimizer;
  608. TEST_P(Test_Model_Optimizer, forward_two_nets)
  609. {
  610. const Backend backendId = get<0>(GetParam());
  611. const Target targetId = get<1>(GetParam());
  612. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD)
  613. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  614. const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
  615. const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
  616. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  617. Net net0 = readNet(model, proto);
  618. net0.setPreferableTarget(targetId);
  619. Net net1 = readNet(model, proto);
  620. net1.setPreferableTarget(targetId);
  621. // Generate inputs.
  622. int blobSize[] = {2, 6, 75, 113};
  623. Mat input(4, &blobSize[0], CV_32F);
  624. randu(input, 0, 255);
  625. net0.setInput(input);
  626. Mat ref0 = net0.forward().clone();
  627. net1.setInput(input);
  628. Mat ref1 = net1.forward();
  629. net0.setInput(input);
  630. Mat ref2 = net0.forward();
  631. normAssert(ref0, ref2, 0, 0);
  632. }
  633. TEST_P(Test_Model_Optimizer, readFromBuffer)
  634. {
  635. const Backend backendId = get<0>(GetParam());
  636. const Target targetId = get<1>(GetParam());
  637. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD)
  638. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  639. if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
  640. throw SkipTestException("No support for async forward");
  641. const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution.bin");
  642. const std::string& modelFile = findDataFile("dnn/layers/layer_convolution.xml");
  643. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  644. Net net1 = readNetFromModelOptimizer(modelFile, weightsFile);
  645. net1.setPreferableBackend(backendId);
  646. net1.setPreferableTarget(targetId);
  647. std::vector<char> modelConfig;
  648. readFileContent(modelFile, modelConfig);
  649. std::vector<char> weights;
  650. readFileContent(weightsFile, weights);
  651. Net net2 = readNetFromModelOptimizer(
  652. (const uchar*)modelConfig.data(), modelConfig.size(),
  653. (const uchar*)weights.data(), weights.size()
  654. );
  655. net2.setPreferableBackend(backendId);
  656. net2.setPreferableTarget(targetId);
  657. int blobSize[] = {2, 6, 75, 113};
  658. Mat input(4, &blobSize[0], CV_32F);
  659. randu(input, 0, 255);
  660. Mat ref, actual;
  661. {
  662. net1.setInput(input);
  663. ref = net1.forward();
  664. }
  665. {
  666. net2.setInput(input);
  667. actual = net2.forward();
  668. }
  669. normAssert(ref, actual, "", 0, 0);
  670. }
  671. TEST_P(Test_Model_Optimizer, flexible_inputs)
  672. {
  673. const Backend backendId = get<0>(GetParam());
  674. const Target targetId = get<1>(GetParam());
  675. if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD)
  676. applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
  677. const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
  678. const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
  679. ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
  680. Net net0 = readNet(model, proto);
  681. net0.setPreferableTarget(targetId);
  682. Net net1 = readNet(model, proto);
  683. net1.setPreferableTarget(targetId);
  684. // Generate inputs.
  685. int blobSize0[] = {2, 6, 75, 113};
  686. Mat input0(4, &blobSize0[0], CV_32F);
  687. randu(input0, 0, 255);
  688. net0.setInput(input0);
  689. Mat ref = net0.forward().clone();
  690. int blobSize1[] = {1, 6, 10, 9};
  691. Mat input1(4, &blobSize1[0], CV_32F);
  692. randu(input1, 0, 255);
  693. net1.setInput(input1);
  694. Mat out = net1.forward();
  695. EXPECT_NE(out.size, ref.size);
  696. net1.setInput(input0);
  697. out = net1.forward();
  698. normAssert(ref, out, 0, 0);
  699. }
  700. INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
  701. dnnBackendsAndTargetsIE()
  702. );
  703. #endif // HAVE_INF_ENGINE
  704. typedef testing::TestWithParam<tuple<MatDepth, MatDepth, tuple<Backend, Target> > > Test_two_inputs;
  705. TEST_P(Test_two_inputs, basic)
  706. {
  707. static const float kScale = 0.5f;
  708. static const float kScaleInv = 1.0f / kScale;
  709. Backend backendId = get<0>(get<2>(GetParam()));
  710. Target targetId = get<1>(get<2>(GetParam()));
  711. int type1 = get<0>(GetParam());
  712. int type2 = get<1>(GetParam());
  713. if (backendId == DNN_BACKEND_VKCOM && !(type1 == CV_32F && type2 == CV_32F))
  714. applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN);
  715. Net net;
  716. LayerParams lp;
  717. lp.type = "Eltwise";
  718. lp.name = "testLayer";
  719. lp.set("operation", "sum");
  720. int eltwiseId = net.addLayerToPrev(lp.name, lp.type, lp); // connect to a first input
  721. net.connect(0, 1, eltwiseId, 1); // connect to a second input
  722. int inpSize[] = {1, 2, 3, 4};
  723. Mat firstInp(4, &inpSize[0], type1);
  724. Mat secondInp(4, &inpSize[0], type2);
  725. randu(firstInp, 0, 100);
  726. randu(secondInp, 0, 100);
  727. #ifndef CV_CXX11
  728. std::vector<String> input_names;
  729. input_names.push_back("data");
  730. input_names.push_back("second_input");
  731. net.setInputsNames(input_names);
  732. #else
  733. net.setInputsNames({"data", "second_input"});
  734. #endif
  735. net.setInput(firstInp, "data", kScale);
  736. net.setInput(secondInp, "second_input", kScaleInv);
  737. net.setPreferableBackend(backendId);
  738. net.setPreferableTarget(targetId);
  739. Mat out = net.forward();
  740. Mat ref;
  741. addWeighted(firstInp, kScale, secondInp, kScaleInv, 0, ref, CV_32F);
  742. double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD || targetId == DNN_TARGET_CUDA_FP16) ? 0.06 : 1e-6;
  743. double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD || targetId == DNN_TARGET_CUDA_FP16) ? 0.3 : 1e-5;
  744. normAssert(out, ref, "", l1, lInf);
  745. if (cvtest::debugLevel > 0 || HasFailure())
  746. {
  747. std::cout << "input1 scale=" << kScale << " input2 scale=" << kScaleInv << std::endl;
  748. std::cout << "input1: " << firstInp.size << " " << firstInp.reshape(1, 1) << std::endl;
  749. std::cout << "input2: " << secondInp.size << " " << secondInp.reshape(1, 1) << std::endl;
  750. std::cout << "ref: " << ref.reshape(1, 1) << std::endl;
  751. std::cout << "out: " << out.reshape(1, 1) << std::endl;
  752. }
  753. }
  754. INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_two_inputs, Combine(
  755. Values(CV_32F, CV_8U),
  756. Values(CV_32F, CV_8U),
  757. dnnBackendsAndTargets()
  758. ));
  759. }} // namespace