diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index de8447d2be..bbe712c133 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -152,6 +152,7 @@ InfEngineBackendNet::InfEngineBackendNet() { targetDevice = InferenceEngine::TargetDevice::eCPU; precision = InferenceEngine::Precision::FP32; + hasNetOwner = false; } InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) @@ -162,6 +163,7 @@ InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) outputs = net.getOutputsInfo(); layers.resize(net.layerCount()); // A hack to execute InfEngineBackendNet::layerCount correctly. netOwner = net; + hasNetOwner = true; } void InfEngineBackendNet::Release() noexcept @@ -178,12 +180,12 @@ void InfEngineBackendNet::setPrecision(InferenceEngine::Precision p) noexcept InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept { - return precision; + return hasNetOwner ? netOwner.getPrecision() : precision; } InferenceEngine::Precision InfEngineBackendNet::getPrecision() const noexcept { - return precision; + return hasNetOwner ? netOwner.getPrecision() : precision; } // Assume that outputs of network is unconnected blobs. diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index ffaeb288b2..e5542b3296 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -134,6 +134,9 @@ private: InferenceEngine::InferRequest infRequest; // In case of models from Model Optimizer we need to manage their lifetime. InferenceEngine::CNNNetwork netOwner; + // There is no way to check if netOwner is initialized or not so we use + // a separate flag to determine if the model has been loaded from IR. + bool hasNetOwner; std::string name; diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp index 2ed07b301f..a914101d51 100644 --- a/modules/dnn/test/test_caffe_importer.cpp +++ b/modules/dnn/test/test_caffe_importer.cpp @@ -471,6 +471,7 @@ TEST(Test_Caffe, shared_weights) net.setInput(blob_1, "input_1"); net.setInput(blob_2, "input_2"); + net.setPreferableBackend(DNN_BACKEND_OPENCV); Mat sum = net.forward(); diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index d7c14f2714..5d41b4b916 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -306,7 +306,7 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc) // batch size 1 testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD) #endif // batch size 2 diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index ea5eafd71b..468953fe7e 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -166,7 +166,7 @@ TEST_P(Deconvolution, Accuracy) if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU && dilation.width == 2 && dilation.height == 2) throw SkipTestException(""); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU && hasBias && group != 1) throw SkipTestException("Test is disabled for OpenVINO 2018R4"); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 1d41daa025..62e625f03c 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -137,7 +137,7 @@ TEST_P(Test_Caffe_layers, Convolution) TEST_P(Test_Caffe_layers, DeConvolution) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) throw SkipTestException("Test is disabled for OpenVINO 2018R4"); #endif @@ -918,8 +918,11 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_DWconv_Prelu, Combine(Values(3, 6), Val // Using Intel's Model Optimizer generate .xml and .bin files: // ./ModelOptimizer -w /path/to/caffemodel -d /path/to/prototxt \ // -p FP32 -i -b ${batch_size} -o /path/to/output/folder -TEST(Layer_Test_Convolution_DLDT, Accuracy) +typedef testing::TestWithParam Layer_Test_Convolution_DLDT; +TEST_P(Layer_Test_Convolution_DLDT, Accuracy) { + Target targetId = GetParam(); + Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt")); Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin")); @@ -930,17 +933,29 @@ TEST(Layer_Test_Convolution_DLDT, Accuracy) Mat outDefault = netDefault.forward(); net.setInput(inp); - Mat out = net.forward(); + net.setPreferableTarget(targetId); - normAssert(outDefault, out); + if (targetId != DNN_TARGET_MYRIAD) + { + Mat out = net.forward(); - std::vector outLayers = net.getUnconnectedOutLayers(); - ASSERT_EQ(net.getLayer(outLayers[0])->name, "output_merge"); - ASSERT_EQ(net.getLayer(outLayers[0])->type, "Concat"); + normAssert(outDefault, out); + + std::vector outLayers = net.getUnconnectedOutLayers(); + ASSERT_EQ(net.getLayer(outLayers[0])->name, "output_merge"); + ASSERT_EQ(net.getLayer(outLayers[0])->type, "Concat"); + } + else + { + // An assertion is expected because the model is in FP32 format but + // Myriad plugin supports only FP16 models. + ASSERT_ANY_THROW(net.forward()); + } } -TEST(Layer_Test_Convolution_DLDT, setInput_uint8) +TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8) { + Target targetId = GetParam(); Mat inp = blobFromNPY(_tf("blob.npy")); Mat inputs[] = {Mat(inp.dims, inp.size, CV_8U), Mat()}; @@ -951,12 +966,25 @@ TEST(Layer_Test_Convolution_DLDT, setInput_uint8) for (int i = 0; i < 2; ++i) { Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin")); + net.setPreferableTarget(targetId); net.setInput(inputs[i]); - outs[i] = net.forward(); - ASSERT_EQ(outs[i].type(), CV_32F); + if (targetId != DNN_TARGET_MYRIAD) + { + outs[i] = net.forward(); + ASSERT_EQ(outs[i].type(), CV_32F); + } + else + { + // An assertion is expected because the model is in FP32 format but + // Myriad plugin supports only FP16 models. + ASSERT_ANY_THROW(net.forward()); + } } - normAssert(outs[0], outs[1]); + if (targetId != DNN_TARGET_MYRIAD) + normAssert(outs[0], outs[1]); } +INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT, + testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))); // 1. Create a .prototxt file with the following network: // layer { @@ -980,14 +1008,17 @@ TEST(Layer_Test_Convolution_DLDT, setInput_uint8) // net.save('/path/to/caffemodel') // // 3. Convert using ModelOptimizer. -typedef testing::TestWithParam > Test_DLDT_two_inputs; +typedef testing::TestWithParam > Test_DLDT_two_inputs; TEST_P(Test_DLDT_two_inputs, as_IR) { int firstInpType = get<0>(GetParam()); int secondInpType = get<1>(GetParam()); - // TODO: It looks like a bug in Inference Engine. + Target targetId = get<2>(GetParam()); + +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000 if (secondInpType == CV_8U) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R4"); +#endif Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin")); int inpSize[] = {1, 2, 3}; @@ -998,11 +1029,21 @@ TEST_P(Test_DLDT_two_inputs, as_IR) net.setInput(firstInp, "data"); net.setInput(secondInp, "second_input"); - Mat out = net.forward(); + net.setPreferableTarget(targetId); + if (targetId != DNN_TARGET_MYRIAD) + { + Mat out = net.forward(); - Mat ref; - cv::add(firstInp, secondInp, ref, Mat(), CV_32F); - normAssert(out, ref); + Mat ref; + cv::add(firstInp, secondInp, ref, Mat(), CV_32F); + normAssert(out, ref); + } + else + { + // An assertion is expected because the model is in FP32 format but + // Myriad plugin supports only FP16 models. + ASSERT_ANY_THROW(net.forward()); + } } TEST_P(Test_DLDT_two_inputs, as_backend) @@ -1010,6 +1051,8 @@ TEST_P(Test_DLDT_two_inputs, as_backend) static const float kScale = 0.5f; static const float kScaleInv = 1.0f / kScale; + Target targetId = get<2>(GetParam()); + Net net; LayerParams lp; lp.type = "Eltwise"; @@ -1018,9 +1061,9 @@ TEST_P(Test_DLDT_two_inputs, as_backend) int eltwiseId = net.addLayerToPrev(lp.name, lp.type, lp); // connect to a first input net.connect(0, 1, eltwiseId, 1); // connect to a second input - int inpSize[] = {1, 2, 3}; - Mat firstInp(3, &inpSize[0], get<0>(GetParam())); - Mat secondInp(3, &inpSize[0], get<1>(GetParam())); + int inpSize[] = {1, 2, 3, 4}; + Mat firstInp(4, &inpSize[0], get<0>(GetParam())); + Mat secondInp(4, &inpSize[0], get<1>(GetParam())); randu(firstInp, 0, 255); randu(secondInp, 0, 255); @@ -1028,15 +1071,20 @@ TEST_P(Test_DLDT_two_inputs, as_backend) net.setInput(firstInp, "data", kScale); net.setInput(secondInp, "second_input", kScaleInv); net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE); + net.setPreferableTarget(targetId); Mat out = net.forward(); Mat ref; addWeighted(firstInp, kScale, secondInp, kScaleInv, 0, ref, CV_32F); - normAssert(out, ref); + // Output values are in range [0, 637.5]. + double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6; + double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5; + normAssert(out, ref, "", l1, lInf); } INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine( - Values(CV_8U, CV_32F), Values(CV_8U, CV_32F) + Values(CV_8U, CV_32F), Values(CV_8U, CV_32F), + testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) )); class UnsupportedLayer : public Layer diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index c640c90ed3..7fa0dc47ef 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -136,7 +136,7 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size) TEST_P(Test_Torch_layers, run_reshape) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) throw SkipTestException("Test is disabled for OpenVINO 2018R4"); #endif @@ -172,7 +172,7 @@ TEST_P(Test_Torch_layers, run_depth_concat) TEST_P(Test_Torch_layers, run_deconv) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000 +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) throw SkipTestException("Test is disabled for OpenVINO 2018R4"); #endif