Merge pull request #22957 from dkurt:new_openvino_api
Switch to new OpenVINO API after 2022.1 release * Pass Layer_Test_Convolution_DLDT.Accuracy/0 test * Pass test Test_Caffe_layers.Softmax * Failed 136 tests * Fix Concat. Failed 120 tests * Custom nGraph ops. 19 failed tests * Set and get properties from Core * Read model from buffer * Change MaxPooling layer output names. Restore reshape * Cosmetic changes * Cosmetic changes * Override getOutputsInfo * Fixes for OpenVINO < 2022.1 * Async inference for 2021.4 and less * Compile model with config * Fix serialize for 2022.1 * Asynchronous inference with 2022.1 * Handle 1d outputs * Work with model with dynamic output shape * Fixes with 1d output for old API * Control outputs by nGraph function for all OpenVINO versions * Refer inputs in PrePostProcessor by indices * Fix cycled dependency between InfEngineNgraphNode and InfEngineNgraphNet. Add InferRequest callback only for async inference. Do not capture InferRequest object. * Fix tests thresholds * Fix HETERO:GPU,CPU plugin issues with unsupported layer
This commit is contained in:
parent
9012e6dd9b
commit
8681686d8f
@ -35,6 +35,7 @@ static bool DNN_IE_SERIALIZE = utils::getConfigurationParameterBool("OPENCV_DNN_
|
|||||||
static std::string kDefaultInpLayerName = "opencv_ngraph_empty_inp_layer_name";
|
static std::string kDefaultInpLayerName = "opencv_ngraph_empty_inp_layer_name";
|
||||||
static constexpr const char* kOpenCVLayersType = "opencv_ngraph_layer";
|
static constexpr const char* kOpenCVLayersType = "opencv_ngraph_layer";
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
|
||||||
static std::string shapesToStr(const std::vector<Mat>& mats)
|
static std::string shapesToStr(const std::vector<Mat>& mats)
|
||||||
{
|
{
|
||||||
std::ostringstream shapes;
|
std::ostringstream shapes;
|
||||||
@ -62,6 +63,7 @@ static void strToShapes(const std::string& str, std::vector<std::vector<size_t>
|
|||||||
ss >> shapes[i][j];
|
ss >> shapes[i][j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif // OpenVINO < 2022.1
|
||||||
|
|
||||||
static std::vector<Ptr<NgraphBackendWrapper> >
|
static std::vector<Ptr<NgraphBackendWrapper> >
|
||||||
ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
|
ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
|
||||||
@ -76,6 +78,61 @@ ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
|
|||||||
return wrappers;
|
return wrappers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
|
||||||
|
class NgraphCustomOp: public ov::op::Op {
|
||||||
|
public:
|
||||||
|
OPENVINO_OP(kOpenCVLayersType);
|
||||||
|
|
||||||
|
NgraphCustomOp(const ngraph::OutputVector& inputs, Ptr<Layer>& cvLayer, const std::vector<Mat>& outputs, const std::vector<Mat>& internals):
|
||||||
|
Op(inputs), cvLayer(cvLayer), outputs(outputs), internals(internals)
|
||||||
|
{
|
||||||
|
constructor_validate_and_infer_types();
|
||||||
|
}
|
||||||
|
|
||||||
|
void validate_and_infer_types() override
|
||||||
|
{
|
||||||
|
set_output_size(outputs.size());
|
||||||
|
for (int i = 0; i < outputs.size(); ++i)
|
||||||
|
{
|
||||||
|
ov::PartialShape shape;
|
||||||
|
for (int j = 0; j < outputs[i].dims; ++j) {
|
||||||
|
shape.push_back(outputs[i].size[j]);
|
||||||
|
}
|
||||||
|
set_output_type(i, get_input_element_type(0), shape);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override
|
||||||
|
{
|
||||||
|
return std::make_shared<NgraphCustomOp>(new_args, cvLayer, outputs, internals);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool has_evaluate() const {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override {
|
||||||
|
std::vector<Mat> inpMats, outMats;
|
||||||
|
infEngineBlobsToMats(inputs, inpMats);
|
||||||
|
infEngineBlobsToMats(outputs, outMats);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
cvLayer->forward(inpMats, outMats, internals);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ptr<Layer>& cvLayer;
|
||||||
|
std::vector<Mat> outputs, internals;
|
||||||
|
};
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
class NgraphCustomOp: public ngraph::op::Op {
|
class NgraphCustomOp: public ngraph::op::Op {
|
||||||
public:
|
public:
|
||||||
const ngraph::NodeTypeInfo& get_type_info() const override
|
const ngraph::NodeTypeInfo& get_type_info() const override
|
||||||
@ -324,7 +381,7 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
|
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
|
||||||
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
|
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
|
||||||
@ -337,15 +394,6 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& n
|
|||||||
std::vector<Mat>& outputs, std::vector<Mat>& internals)
|
std::vector<Mat>& outputs, std::vector<Mat>& internals)
|
||||||
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), cvLayer(cvLayer_)
|
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), cvLayer(cvLayer_)
|
||||||
{
|
{
|
||||||
std::ostringstream oss;
|
|
||||||
oss << (size_t)cvLayer.get();
|
|
||||||
|
|
||||||
std::map<std::string, InferenceEngine::Parameter> params = {
|
|
||||||
{"impl", oss.str()},
|
|
||||||
{"outputs", shapesToStr(outputs)},
|
|
||||||
{"internals", shapesToStr(internals)}
|
|
||||||
};
|
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3)
|
||||||
ngraph::OutputVector inp_nodes;
|
ngraph::OutputVector inp_nodes;
|
||||||
#else
|
#else
|
||||||
@ -353,7 +401,19 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& n
|
|||||||
#endif
|
#endif
|
||||||
for (const auto& node : nodes)
|
for (const auto& node : nodes)
|
||||||
inp_nodes.emplace_back(node.dynamicCast<InfEngineNgraphNode>()->node);
|
inp_nodes.emplace_back(node.dynamicCast<InfEngineNgraphNode>()->node);
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
node = std::make_shared<NgraphCustomOp>(inp_nodes, cvLayer, outputs, internals);
|
||||||
|
#else
|
||||||
|
std::ostringstream oss;
|
||||||
|
oss << (size_t)cvLayer.get();
|
||||||
|
std::map<std::string, InferenceEngine::Parameter> params = {
|
||||||
|
{"impl", oss.str()},
|
||||||
|
{"outputs", shapesToStr(outputs)},
|
||||||
|
{"internals", shapesToStr(internals)}
|
||||||
|
};
|
||||||
node = std::make_shared<NgraphCustomOp>(inp_nodes, params);
|
node = std::make_shared<NgraphCustomOp>(inp_nodes, params);
|
||||||
|
#endif
|
||||||
|
|
||||||
CV_Assert(!cvLayer->name.empty());
|
CV_Assert(!cvLayer->name.empty());
|
||||||
setName(cvLayer->name);
|
setName(cvLayer->name);
|
||||||
@ -383,7 +443,7 @@ void InfEngineNgraphNet::addOutput(const Ptr<InfEngineNgraphNode>& node)
|
|||||||
CV_Assert(node);
|
CV_Assert(node);
|
||||||
CV_Assert(node->node);
|
CV_Assert(node->node);
|
||||||
const std::string& name = node->node->get_friendly_name();
|
const std::string& name = node->node->get_friendly_name();
|
||||||
requestedOutputs.insert({name, node});
|
requestedOutputs.insert({name, node.get()});
|
||||||
}
|
}
|
||||||
|
|
||||||
void InfEngineNgraphNet::setNodePtr(std::shared_ptr<ngraph::Node>* ptr) {
|
void InfEngineNgraphNet::setNodePtr(std::shared_ptr<ngraph::Node>* ptr) {
|
||||||
@ -457,6 +517,9 @@ void InfEngineNgraphNet::createNet(Target targetId) {
|
|||||||
CV_LOG_DEBUG(NULL, "DNN/NGRAPH: Add 'Result' output: " << output_node_it->first);
|
CV_LOG_DEBUG(NULL, "DNN/NGRAPH: Add 'Result' output: " << output_node_it->first);
|
||||||
CV_Assert(output_node_it->second);
|
CV_Assert(output_node_it->second);
|
||||||
auto out = std::make_shared<ngraph::op::Result>(output_node_it->second->node);
|
auto out = std::make_shared<ngraph::op::Result>(output_node_it->second->node);
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
out->set_friendly_name(output_node_it->first + (output_node_it->second->node->get_output_size() == 1 ? "" : ".0"));
|
||||||
|
#endif
|
||||||
outs.push_back(out);
|
outs.push_back(out);
|
||||||
}
|
}
|
||||||
CV_Assert_N(!inputs_vec.empty(), !outs.empty());
|
CV_Assert_N(!inputs_vec.empty(), !outs.empty());
|
||||||
@ -504,12 +567,20 @@ void InfEngineNgraphNet::createNet(Target targetId) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
static inline
|
||||||
|
InferenceEngine::Layout estimateLayout(size_t dims);
|
||||||
|
#endif
|
||||||
|
|
||||||
void InfEngineNgraphNet::init(Target targetId)
|
void InfEngineNgraphNet::init(Target targetId)
|
||||||
{
|
{
|
||||||
if (!hasNetOwner)
|
if (!hasNetOwner)
|
||||||
{
|
{
|
||||||
if (targetId == DNN_TARGET_OPENCL_FP16)
|
if (targetId == DNN_TARGET_OPENCL_FP16)
|
||||||
{
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ov::pass::ConvertFP32ToFP16().run_on_model(ngraph_function);
|
||||||
|
#else
|
||||||
auto nodes = ngraph_function->get_ordered_ops();
|
auto nodes = ngraph_function->get_ordered_ops();
|
||||||
for (auto& node : nodes)
|
for (auto& node : nodes)
|
||||||
{
|
{
|
||||||
@ -533,6 +604,7 @@ void InfEngineNgraphNet::init(Target targetId)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ngraph_function->validate_nodes_and_infer_types();
|
ngraph_function->validate_nodes_and_infer_types();
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
}
|
}
|
||||||
cnn = InferenceEngine::CNNNetwork(ngraph_function);
|
cnn = InferenceEngine::CNNNetwork(ngraph_function);
|
||||||
|
|
||||||
@ -580,20 +652,45 @@ void InfEngineNgraphNet::init(Target targetId)
|
|||||||
CV_Error(Error::StsNotImplemented, "Unknown target");
|
CV_Error(Error::StsNotImplemented, "Unknown target");
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!hasNetOwner) {
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
for (size_t i = 0; i < ngraph_function->get_output_size(); ++i) {
|
auto model = cnn.getFunction();
|
||||||
auto node = ngraph_function->output(i).get_node();
|
ov::preprocess::PrePostProcessor ppp(model);
|
||||||
for (size_t j = 0; j < node->get_input_size(); ++j) {
|
int i = 0;
|
||||||
std::string name = node->input_value(j).get_node()->get_friendly_name();
|
for (const auto& inp : model->inputs()) { // TODO: not sure why but ngraph_function->inputs() here causes segfault.
|
||||||
auto iter = requestedOutputs.find(name);
|
const std::string& name = inp.get_node()->get_friendly_name();
|
||||||
if (iter != requestedOutputs.end()) {
|
auto blobIt = allBlobs.find(name);
|
||||||
requestedOutputs.erase(iter);
|
CV_Assert(blobIt != allBlobs.end());
|
||||||
cnn.addOutput(name);
|
|
||||||
}
|
auto srcT = blobIt->second.get_element_type();
|
||||||
}
|
if (srcT != inp.get_node()->get_element_type()) {
|
||||||
|
ppp.input(i++).tensor().set_element_type(srcT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
for (const auto& it : model->outputs())
|
||||||
|
{
|
||||||
|
const std::string& name = it.get_node()->get_friendly_name();
|
||||||
|
auto blobIt = allBlobs.find(name);
|
||||||
|
CV_Assert(blobIt != allBlobs.end());
|
||||||
|
const auto& src = blobIt->second;
|
||||||
|
|
||||||
|
// A workaround for single dimension output for which OpenCV allocates 2d Mat.
|
||||||
|
// For example, face-detection-0105 with Result of shape {200} while output blob is {200, 1}
|
||||||
|
auto outShape = it.get_partial_shape().get_max_shape();
|
||||||
|
if (outShape != src.get_shape()) {
|
||||||
|
size_t sz = std::accumulate(outShape.begin(), outShape.end(), 1, std::multiplies<size_t>());
|
||||||
|
CV_Assert(sz == src.get_size());
|
||||||
|
allBlobs[name] = ov::Tensor(src.get_element_type(), outShape, src.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
ppp.output(i++).tensor().set_element_type(ov::element::f32); // Should be always FP32
|
||||||
|
}
|
||||||
|
|
||||||
|
ppp.build();
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
for (const auto& it : cnn.getInputsInfo())
|
for (const auto& it : cnn.getInputsInfo())
|
||||||
{
|
{
|
||||||
const std::string& name = it.first;
|
const std::string& name = it.first;
|
||||||
@ -607,8 +704,16 @@ void InfEngineNgraphNet::init(Target targetId)
|
|||||||
const std::string& name = it.first;
|
const std::string& name = it.first;
|
||||||
auto blobIt = allBlobs.find(name);
|
auto blobIt = allBlobs.find(name);
|
||||||
CV_Assert(blobIt != allBlobs.end());
|
CV_Assert(blobIt != allBlobs.end());
|
||||||
|
InferenceEngine::TensorDesc& desc = blobIt->second->getTensorDesc();
|
||||||
|
|
||||||
|
auto outShape = it.second->getDims();
|
||||||
|
if (outShape != desc.getDims()) {
|
||||||
|
desc.reshape(outShape, estimateLayout(outShape.size()));
|
||||||
|
}
|
||||||
|
|
||||||
it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
|
it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
|
||||||
}
|
}
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
initPlugin(cnn);
|
initPlugin(cnn);
|
||||||
}
|
}
|
||||||
@ -660,6 +765,9 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
|||||||
const std::string& libName = candidates[i];
|
const std::string& libName = candidates[i];
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ie.add_extension(libName);
|
||||||
|
#else
|
||||||
InferenceEngine::IExtensionPtr extension =
|
InferenceEngine::IExtensionPtr extension =
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
|
||||||
std::make_shared<InferenceEngine::Extension>(libName);
|
std::make_shared<InferenceEngine::Extension>(libName);
|
||||||
@ -668,6 +776,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
ie.AddExtension(extension, "CPU");
|
ie.AddExtension(extension, "CPU");
|
||||||
|
#endif
|
||||||
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
|
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
|
||||||
found = true;
|
found = true;
|
||||||
break;
|
break;
|
||||||
@ -678,6 +787,7 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
|||||||
{
|
{
|
||||||
CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter");
|
CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter");
|
||||||
}
|
}
|
||||||
|
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
|
||||||
// Some of networks can work without a library of extra layers.
|
// Some of networks can work without a library of extra layers.
|
||||||
// OpenCV fallbacks as extensions.
|
// OpenCV fallbacks as extensions.
|
||||||
try
|
try
|
||||||
@ -688,12 +798,17 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
|||||||
{
|
{
|
||||||
CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers nGraph extension: " << e.what());
|
CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers nGraph extension: " << e.what());
|
||||||
}
|
}
|
||||||
|
#endif // OpenVINO < 2022.1
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
// Limit the number of CPU threads.
|
// Limit the number of CPU threads.
|
||||||
if (device_name == "CPU")
|
if (device_name == "CPU")
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ie.set_property(device_name, ov::inference_num_threads(getNumThreads()));
|
||||||
|
#else
|
||||||
ie.SetConfig({{
|
ie.SetConfig({{
|
||||||
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
|
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
|
||||||
}}, device_name);
|
}}, device_name);
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
#endif
|
#endif
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_2)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_2)
|
||||||
if (device_name.find("GPU") == 0)
|
if (device_name.find("GPU") == 0)
|
||||||
@ -706,9 +821,13 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
|||||||
if (!cache_path.empty() && cache_path != "disabled")
|
if (!cache_path.empty() && cache_path != "disabled")
|
||||||
{
|
{
|
||||||
CV_LOG_INFO(NULL, "OpenCV/nGraph: using GPU kernels cache: " << cache_path);
|
CV_LOG_INFO(NULL, "OpenCV/nGraph: using GPU kernels cache: " << cache_path);
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ie.set_property(device_name, ov::cache_dir(cache_path));
|
||||||
|
#else
|
||||||
ie.SetConfig({{
|
ie.SetConfig({{
|
||||||
InferenceEngine::PluginConfigParams::KEY_CACHE_DIR, cache_path,
|
InferenceEngine::PluginConfigParams::KEY_CACHE_DIR, cache_path,
|
||||||
}}, device_name);
|
}}, device_name);
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -716,9 +835,9 @@ void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
|
|||||||
std::map<std::string, std::string> config;
|
std::map<std::string, std::string> config;
|
||||||
if (device_name == "MYRIAD" || device_name == "HDDL") {
|
if (device_name == "MYRIAD" || device_name == "HDDL") {
|
||||||
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
|
||||||
config.emplace("MYRIAD_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
|
config.emplace("MYRIAD_DETECT_NETWORK_BATCH", "NO");
|
||||||
#else
|
#else
|
||||||
config.emplace("VPU_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
|
config.emplace("VPU_DETECT_NETWORK_BATCH", "NO");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -757,16 +876,17 @@ bool NgraphBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
|
|||||||
std::vector<MatShape> &outputs,
|
std::vector<MatShape> &outputs,
|
||||||
std::vector<MatShape> &internals) const
|
std::vector<MatShape> &internals) const
|
||||||
{
|
{
|
||||||
InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
|
auto ngraphFunction = t_net.getFunction();
|
||||||
InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
|
|
||||||
bool equal_flag = true;
|
bool equal_flag = true;
|
||||||
size_t i = 0;
|
std::map<std::string, std::vector<size_t> > inShapes;
|
||||||
for (itr = inShapes.begin(); itr != inShapes.end(); ++itr)
|
int i = 0;
|
||||||
|
for (const auto& inp : ngraphFunction->get_parameters())
|
||||||
{
|
{
|
||||||
InferenceEngine::SizeVector currentInShape(inputs[i].begin(), inputs[i].end());
|
std::vector<size_t> oldShape = inp->get_shape();
|
||||||
if (itr->second != currentInShape)
|
std::vector<size_t> newShape(inputs[i].begin(), inputs[i].end());
|
||||||
|
inShapes.insert({inp->get_friendly_name(), newShape});
|
||||||
|
if (oldShape != newShape)
|
||||||
{
|
{
|
||||||
itr->second = currentInShape;
|
|
||||||
equal_flag = false;
|
equal_flag = false;
|
||||||
}
|
}
|
||||||
i++;
|
i++;
|
||||||
@ -777,7 +897,18 @@ bool NgraphBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
|
|||||||
InferenceEngine::CNNNetwork curr_t_net(t_net);
|
InferenceEngine::CNNNetwork curr_t_net(t_net);
|
||||||
curr_t_net.reshape(inShapes);
|
curr_t_net.reshape(inShapes);
|
||||||
}
|
}
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
std::vector<size_t> dims;
|
||||||
|
for (const auto& it : ngraphFunction->outputs()) {
|
||||||
|
if (it.get_node()->get_friendly_name() == name) {
|
||||||
|
dims = it.get_partial_shape().get_max_shape();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dims.empty())
|
||||||
|
CV_Error(Error::StsError, format("Unable find result with name %s", name.c_str()));
|
||||||
|
#else
|
||||||
std::vector<size_t> dims = t_net.getOutputsInfo()[name]->getDims();
|
std::vector<size_t> dims = t_net.getOutputsInfo()[name]->getDims();
|
||||||
|
#endif
|
||||||
outputs.push_back(MatShape(dims.begin(), dims.end()));
|
outputs.push_back(MatShape(dims.begin(), dims.end()));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -795,6 +926,21 @@ void NgraphBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays
|
|||||||
CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend.");
|
CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
|
||||||
|
ov::Tensor wrapToNgraphBlob(const Mat& m) {
|
||||||
|
std::vector<size_t> shape = getShape<size_t>(m);
|
||||||
|
if (m.type() == CV_32F)
|
||||||
|
return ov::Tensor(ov::element::f32, shape, m.data);
|
||||||
|
else if (m.type() == CV_8U)
|
||||||
|
return ov::Tensor(ov::element::u8, shape, m.data);
|
||||||
|
else if (m.type() == CV_32SC1)
|
||||||
|
return ov::Tensor(ov::element::i32, shape, m.data);
|
||||||
|
else
|
||||||
|
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
static InferenceEngine::Layout estimateLayout(int dims)
|
static InferenceEngine::Layout estimateLayout(int dims)
|
||||||
{
|
{
|
||||||
@ -823,19 +969,6 @@ InferenceEngine::Layout estimateLayout(const Mat& m)
|
|||||||
return estimateLayout(m.dims);
|
return estimateLayout(m.dims);
|
||||||
}
|
}
|
||||||
|
|
||||||
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
|
|
||||||
{
|
|
||||||
std::vector<size_t> shape = getShape<size_t>(m);
|
|
||||||
if (m.type() == CV_32F)
|
|
||||||
return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
|
|
||||||
{InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
|
|
||||||
else if (m.type() == CV_8U)
|
|
||||||
return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
|
|
||||||
{InferenceEngine::Precision::U8, shape, estimateLayout(m)}));
|
|
||||||
else
|
|
||||||
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, const std::vector<size_t>& shape,
|
InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, const std::vector<size_t>& shape,
|
||||||
InferenceEngine::Layout layout)
|
InferenceEngine::Layout layout)
|
||||||
{
|
{
|
||||||
@ -845,6 +978,9 @@ InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, const std::vector<size
|
|||||||
else if (m.type() == CV_8U)
|
else if (m.type() == CV_8U)
|
||||||
return InferenceEngine::make_shared_blob<uint8_t>(
|
return InferenceEngine::make_shared_blob<uint8_t>(
|
||||||
{InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
|
{InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
|
||||||
|
else if (m.type() == CV_32SC1)
|
||||||
|
return InferenceEngine::make_shared_blob<int32_t>(
|
||||||
|
{InferenceEngine::Precision::I32, shape, layout}, (int32_t*)m.data);
|
||||||
else
|
else
|
||||||
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
|
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
|
||||||
}
|
}
|
||||||
@ -855,12 +991,15 @@ InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, InferenceEngine::Layou
|
|||||||
return wrapToNgraphBlob(m, shape, layout);
|
return wrapToNgraphBlob(m, shape, layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m) { return wrapToNgraphBlob(m, estimateLayout(m)); }
|
||||||
|
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m)
|
NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m)
|
||||||
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, targetId)
|
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, targetId)
|
||||||
, host((Mat*)&m)
|
, host((Mat*)&m)
|
||||||
{
|
{
|
||||||
dataPtr = wrapToInfEngineDataNode(m);
|
blob = wrapToNgraphBlob(m);
|
||||||
blob = wrapToNgraphBlob(m, estimateLayout(m));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)
|
NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)
|
||||||
@ -868,8 +1007,7 @@ NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)
|
|||||||
{
|
{
|
||||||
Ptr<NgraphBackendWrapper> ieWrapper = wrapper.dynamicCast<NgraphBackendWrapper>();
|
Ptr<NgraphBackendWrapper> ieWrapper = wrapper.dynamicCast<NgraphBackendWrapper>();
|
||||||
CV_Assert(!ieWrapper.empty());
|
CV_Assert(!ieWrapper.empty());
|
||||||
InferenceEngine::DataPtr srcData = ieWrapper->dataPtr;
|
name = ieWrapper->name;
|
||||||
dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc()));
|
|
||||||
blob = ieWrapper->blob;
|
blob = ieWrapper->blob;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -895,6 +1033,12 @@ void NgraphBackendWrapper::setHostDirty()
|
|||||||
//CV_Error(Error::StsNotImplemented, "");
|
//CV_Error(Error::StsNotImplemented, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ov::Tensor copyBlob(const ov::Tensor& blob)
|
||||||
|
{
|
||||||
|
return ov::Tensor(blob.get_element_type(), blob.get_shape());
|
||||||
|
}
|
||||||
|
#else
|
||||||
InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
|
InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
|
||||||
{
|
{
|
||||||
InferenceEngine::Blob::Ptr copy;
|
InferenceEngine::Blob::Ptr copy;
|
||||||
@ -918,88 +1062,13 @@ InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
|
|||||||
return copy;
|
return copy;
|
||||||
}
|
}
|
||||||
|
|
||||||
InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr)
|
#endif // OpenVINO < 2022.1
|
||||||
{
|
|
||||||
CV_Assert(!ptr.empty());
|
|
||||||
Ptr<NgraphBackendWrapper> p = ptr.dynamicCast<NgraphBackendWrapper>();
|
|
||||||
CV_Assert(!p.empty());
|
|
||||||
return p->dataPtr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
InferenceEngine::Blob::Ptr reallocateBlob(Mat &m, const InferenceEngine::TensorDesc& description)
|
|
||||||
{
|
|
||||||
auto dims = description.getDims();
|
|
||||||
auto layout = estimateLayout(dims.size());
|
|
||||||
MatShape matShape(dims.begin(), dims.end());
|
|
||||||
if (description.getPrecision() == InferenceEngine::Precision::FP32)
|
|
||||||
{
|
|
||||||
m.create(matShape, CV_32FC1);
|
|
||||||
return InferenceEngine::make_shared_blob<float>(
|
|
||||||
{description.getPrecision(), dims, layout}, (float*)m.data);
|
|
||||||
}
|
|
||||||
else if (description.getPrecision() == InferenceEngine::Precision::I32)
|
|
||||||
{
|
|
||||||
m.create(matShape, CV_32SC1);
|
|
||||||
return InferenceEngine::make_shared_blob<int>(
|
|
||||||
{description.getPrecision(), dims, layout}, (int*)m.data);
|
|
||||||
}
|
|
||||||
else if (description.getPrecision() == InferenceEngine::Precision::U8)
|
|
||||||
{
|
|
||||||
m.create(matShape, CV_8UC1);
|
|
||||||
return InferenceEngine::make_shared_blob<uchar>(
|
|
||||||
{description.getPrecision(), dims, layout}, (uchar*)m.data);
|
|
||||||
}
|
|
||||||
std::ostringstream msg;
|
|
||||||
msg << "Unsupported IE precision: " << description.getPrecision();
|
|
||||||
CV_Error(Error::StsNotImplemented, msg.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::DataPtr ngraphDataOutputNode(
|
|
||||||
const Ptr<BackendWrapper>& ptr,
|
|
||||||
const InferenceEngine::TensorDesc& description,
|
|
||||||
const std::string name)
|
|
||||||
{
|
|
||||||
CV_Assert(!ptr.empty());
|
|
||||||
Ptr<NgraphBackendWrapper> p = ptr.dynamicCast<NgraphBackendWrapper>();
|
|
||||||
CV_Assert(!p.empty());
|
|
||||||
NgraphBackendWrapper& w = *p;
|
|
||||||
const InferenceEngine::TensorDesc& blobDesc = w.blob.get()->getTensorDesc();
|
|
||||||
auto dims = description.getDims();
|
|
||||||
bool reallocate = false;
|
|
||||||
if (blobDesc.getPrecision() != description.getPrecision())
|
|
||||||
{
|
|
||||||
reallocate = true;
|
|
||||||
CV_LOG_WARNING(NULL, "Reallocate output '" << name << "' blob due to wrong precision: " << blobDesc.getPrecision() << " => " << description.getPrecision() << " ndims=" << dims.size());
|
|
||||||
}
|
|
||||||
if (dims.size() != blobDesc.getDims().size())
|
|
||||||
{
|
|
||||||
reallocate = true;
|
|
||||||
CV_LOG_WARNING(NULL, "Reallocate output '" << name << "' blob due to wrong dims: " << blobDesc.getDims().size() << " => " << dims.size());
|
|
||||||
}
|
|
||||||
if (reallocate)
|
|
||||||
{
|
|
||||||
auto layout = estimateLayout(dims.size());
|
|
||||||
w.dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(name,
|
|
||||||
{description.getPrecision(), dims, layout}));
|
|
||||||
w.blob = reallocateBlob(*w.host, description);
|
|
||||||
}
|
|
||||||
return w.dataPtr;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void InfEngineNgraphNet::reset()
|
void InfEngineNgraphNet::reset()
|
||||||
{
|
{
|
||||||
allBlobs.clear();
|
allBlobs.clear();
|
||||||
infRequests.clear();
|
infRequests.clear();
|
||||||
isInit = false;
|
isInit = false;
|
||||||
|
|
||||||
outputsDesc.clear();
|
|
||||||
for (const auto& it : cnn.getOutputsInfo())
|
|
||||||
{
|
|
||||||
const std::string& name = it.first;
|
|
||||||
outputsDesc.insert({name, it.second->getTensorDesc()});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
|
void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
|
||||||
@ -1007,7 +1076,7 @@ void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& p
|
|||||||
auto wrappers = ngraphWrappers(ptrs);
|
auto wrappers = ngraphWrappers(ptrs);
|
||||||
for (const auto& wrapper : wrappers)
|
for (const auto& wrapper : wrappers)
|
||||||
{
|
{
|
||||||
std::string name = wrapper->dataPtr->getName();
|
std::string name = wrapper->name;
|
||||||
name = name.empty() ? kDefaultInpLayerName : name;
|
name = name.empty() ? kDefaultInpLayerName : name;
|
||||||
allBlobs.insert({name, wrapper->blob});
|
allBlobs.insert({name, wrapper->blob});
|
||||||
}
|
}
|
||||||
@ -1022,27 +1091,10 @@ void InfEngineNgraphNet::NgraphReqWrapper::makePromises(const std::vector<Ptr<Ba
|
|||||||
for (int i = 0; i < outs.size(); ++i)
|
for (int i = 0; i < outs.size(); ++i)
|
||||||
{
|
{
|
||||||
outs[i]->futureMat = outProms[i].getArrayResult();
|
outs[i]->futureMat = outProms[i].getArrayResult();
|
||||||
outsNames[i] = outs[i]->dataPtr->getName();
|
outsNames[i] = outs[i]->name;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat ngraphBlobToMat(const InferenceEngine::Blob::Ptr& blob)
|
|
||||||
{
|
|
||||||
std::vector<size_t> dims = blob->getTensorDesc().getDims();
|
|
||||||
std::vector<int> size(dims.begin(), dims.end());
|
|
||||||
auto precision = blob->getTensorDesc().getPrecision();
|
|
||||||
|
|
||||||
int type = -1;
|
|
||||||
switch (precision)
|
|
||||||
{
|
|
||||||
case InferenceEngine::Precision::FP32: type = CV_32F; break;
|
|
||||||
case InferenceEngine::Precision::U8: type = CV_8U; break;
|
|
||||||
default:
|
|
||||||
CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
|
|
||||||
}
|
|
||||||
return Mat(size, type, (void*)blob->buffer());
|
|
||||||
}
|
|
||||||
|
|
||||||
void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, bool isAsync)
|
void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, bool isAsync)
|
||||||
{
|
{
|
||||||
CV_LOG_DEBUG(NULL, "InfEngineNgraphNet::forward(" << (isAsync ? "async" : "sync") << ")");
|
CV_LOG_DEBUG(NULL, "InfEngineNgraphNet::forward(" << (isAsync ? "async" : "sync") << ")");
|
||||||
@ -1070,6 +1122,25 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
|||||||
}
|
}
|
||||||
infRequests.push_back(reqWrapper);
|
infRequests.push_back(reqWrapper);
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
int i = 0;
|
||||||
|
for (const auto& it : netExec.inputs())
|
||||||
|
{
|
||||||
|
const std::string& name = it.get_node()->get_friendly_name();
|
||||||
|
auto blobIt = allBlobs.find(name);
|
||||||
|
CV_Assert(blobIt != allBlobs.end());
|
||||||
|
reqWrapper->req.set_input_tensor(i++, isAsync ? copyBlob(blobIt->second) : blobIt->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
for (const auto& it : netExec.outputs())
|
||||||
|
{
|
||||||
|
const std::string& name = it.get_node()->get_friendly_name();
|
||||||
|
auto blobIt = allBlobs.find(name);
|
||||||
|
CV_Assert(blobIt != allBlobs.end());
|
||||||
|
reqWrapper->req.set_output_tensor(i++, isAsync ? copyBlob(blobIt->second) : blobIt->second);
|
||||||
|
}
|
||||||
|
#else
|
||||||
InferenceEngine::BlobMap inpBlobs, outBlobs;
|
InferenceEngine::BlobMap inpBlobs, outBlobs;
|
||||||
for (const auto& it : cnn.getInputsInfo())
|
for (const auto& it : cnn.getInputsInfo())
|
||||||
{
|
{
|
||||||
@ -1087,6 +1158,53 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
|||||||
}
|
}
|
||||||
reqWrapper->req.SetInput(inpBlobs);
|
reqWrapper->req.SetInput(inpBlobs);
|
||||||
reqWrapper->req.SetOutput(outBlobs);
|
reqWrapper->req.SetOutput(outBlobs);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
if (isAsync) {
|
||||||
|
bool* isReady = &reqWrapper->isReady;
|
||||||
|
auto* promises = &reqWrapper->outProms;
|
||||||
|
auto* req = &reqWrapper->req;
|
||||||
|
reqWrapper->req.set_callback([isReady, promises, req](std::exception_ptr ex) {
|
||||||
|
CV_LOG_DEBUG(NULL, "DNN(nGraph): completionCallback(" << (int)status << ")");
|
||||||
|
|
||||||
|
size_t processedOutputs = 0;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
for (; processedOutputs < promises->size(); ++processedOutputs)
|
||||||
|
{
|
||||||
|
Mat m = infEngineBlobToMat(req->get_output_tensor(processedOutputs));
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
(*promises)[processedOutputs].setValue(m.clone());
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
(*promises)[processedOutputs].setException(std::current_exception());
|
||||||
|
} catch(...) {
|
||||||
|
CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
std::exception_ptr e = std::current_exception();
|
||||||
|
for (; processedOutputs < promises->size(); ++processedOutputs)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
(*promises)[processedOutputs].setException(e);
|
||||||
|
} catch(...) {
|
||||||
|
CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*isReady = true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#else // OpenVINO >= 2022.1
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
|
||||||
InferenceEngine::InferRequest infRequest = reqWrapper->req;
|
InferenceEngine::InferRequest infRequest = reqWrapper->req;
|
||||||
@ -1125,7 +1243,7 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
|||||||
for (; processedOutputs < wrapper.outProms.size(); ++processedOutputs)
|
for (; processedOutputs < wrapper.outProms.size(); ++processedOutputs)
|
||||||
{
|
{
|
||||||
const std::string& name = wrapper.outsNames[processedOutputs];
|
const std::string& name = wrapper.outsNames[processedOutputs];
|
||||||
Mat m = ngraphBlobToMat(wrapper.req.GetBlob(name));
|
Mat m = infEngineBlobToMat(wrapper.req.GetBlob(name));
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -1157,8 +1275,34 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
|||||||
wrapper.isReady = true;
|
wrapper.isReady = true;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
if (isAsync)
|
||||||
|
{
|
||||||
|
// Copy actual data to infer request's input blobs.
|
||||||
|
int i = 0;
|
||||||
|
for (const auto& it : cnn.getFunction()->get_parameters())
|
||||||
|
{
|
||||||
|
const std::string& name = it->get_friendly_name();
|
||||||
|
auto blobIt = allBlobs.find(name);
|
||||||
|
Mat srcMat = infEngineBlobToMat(blobIt->second);
|
||||||
|
Mat dstMat = infEngineBlobToMat(reqWrapper->req.get_input_tensor(i++));
|
||||||
|
srcMat.copyTo(dstMat);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set promises to output blobs wrappers.
|
||||||
|
reqWrapper->makePromises(outBlobsWrappers);
|
||||||
|
|
||||||
|
reqWrapper->isReady = false;
|
||||||
|
reqWrapper->req.start_async();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
reqWrapper->req.infer();
|
||||||
|
}
|
||||||
|
#else
|
||||||
if (isAsync)
|
if (isAsync)
|
||||||
{
|
{
|
||||||
// Copy actual data to infer request's input blobs.
|
// Copy actual data to infer request's input blobs.
|
||||||
@ -1166,8 +1310,8 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
|||||||
{
|
{
|
||||||
const std::string& name = it.first;
|
const std::string& name = it.first;
|
||||||
auto blobIt = allBlobs.find(name);
|
auto blobIt = allBlobs.find(name);
|
||||||
Mat srcMat = ngraphBlobToMat(blobIt->second);
|
Mat srcMat = infEngineBlobToMat(blobIt->second);
|
||||||
Mat dstMat = ngraphBlobToMat(reqWrapper->req.GetBlob(name));
|
Mat dstMat = infEngineBlobToMat(reqWrapper->req.GetBlob(name));
|
||||||
srcMat.copyTo(dstMat);
|
srcMat.copyTo(dstMat);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1181,6 +1325,7 @@ void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlo
|
|||||||
{
|
{
|
||||||
reqWrapper->req.Infer();
|
reqWrapper->req.Infer();
|
||||||
}
|
}
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -68,7 +68,11 @@ public:
|
|||||||
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>* > all_nodes;
|
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>* > all_nodes;
|
||||||
|
|
||||||
InferenceEngine::ExecutableNetwork netExec;
|
InferenceEngine::ExecutableNetwork netExec;
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
std::map<std::string, ov::Tensor> allBlobs;
|
||||||
|
#else
|
||||||
InferenceEngine::BlobMap allBlobs;
|
InferenceEngine::BlobMap allBlobs;
|
||||||
|
#endif
|
||||||
std::string device_name;
|
std::string device_name;
|
||||||
bool isInit = false;
|
bool isInit = false;
|
||||||
|
|
||||||
@ -87,9 +91,7 @@ public:
|
|||||||
|
|
||||||
InferenceEngine::CNNNetwork cnn;
|
InferenceEngine::CNNNetwork cnn;
|
||||||
bool hasNetOwner;
|
bool hasNetOwner;
|
||||||
std::unordered_map<std::string, Ptr<InfEngineNgraphNode> > requestedOutputs;
|
std::unordered_map<std::string, InfEngineNgraphNode*> requestedOutputs;
|
||||||
|
|
||||||
std::map<std::string, InferenceEngine::TensorDesc> outputsDesc;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class InfEngineNgraphNode : public BackendNode
|
class InfEngineNgraphNode : public BackendNode
|
||||||
@ -123,17 +125,15 @@ public:
|
|||||||
virtual void setHostDirty() CV_OVERRIDE;
|
virtual void setHostDirty() CV_OVERRIDE;
|
||||||
|
|
||||||
Mat* host;
|
Mat* host;
|
||||||
InferenceEngine::DataPtr dataPtr;
|
std::string name;
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ov::Tensor blob;
|
||||||
|
#else
|
||||||
InferenceEngine::Blob::Ptr blob;
|
InferenceEngine::Blob::Ptr blob;
|
||||||
|
#endif
|
||||||
AsyncArray futureMat;
|
AsyncArray futureMat;
|
||||||
};
|
};
|
||||||
|
|
||||||
InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr);
|
|
||||||
InferenceEngine::DataPtr ngraphDataOutputNode(
|
|
||||||
const Ptr<BackendWrapper>& ptr,
|
|
||||||
const InferenceEngine::TensorDesc& description,
|
|
||||||
const std::string name);
|
|
||||||
|
|
||||||
// This is a fake class to run networks from Model Optimizer. Objects of that
|
// This is a fake class to run networks from Model Optimizer. Objects of that
|
||||||
// class simulate responses of layers are imported by OpenCV and supported by
|
// class simulate responses of layers are imported by OpenCV and supported by
|
||||||
// Inference Engine. The main difference is that they do not perform forward pass.
|
// Inference Engine. The main difference is that they do not perform forward pass.
|
||||||
|
|||||||
@ -403,8 +403,7 @@ public:
|
|||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
|
const int numDims = nodes[0].dynamicCast<InfEngineNgraphNode>()->node->get_shape().size();
|
||||||
const int numDims = data->getDims().size();
|
|
||||||
const int cAxis = normalize_axis(axis, numDims);
|
const int cAxis = normalize_axis(axis, numDims);
|
||||||
std::vector<size_t> maxDims(numDims, 0);
|
std::vector<size_t> maxDims(numDims, 0);
|
||||||
|
|
||||||
@ -412,16 +411,17 @@ public:
|
|||||||
ngraph::OutputVector inp_nodes;
|
ngraph::OutputVector inp_nodes;
|
||||||
for (int i = 0; i < nodes.size(); ++i)
|
for (int i = 0; i < nodes.size(); ++i)
|
||||||
{
|
{
|
||||||
inp_nodes.push_back(nodes[i].dynamicCast<InfEngineNgraphNode>()->node);
|
auto inp = nodes[i].dynamicCast<InfEngineNgraphNode>()->node;
|
||||||
|
inp_nodes.push_back(inp);
|
||||||
|
|
||||||
std::vector<size_t> inpShape = ngraphDataNode(inputs[i])->getDims();
|
std::vector<size_t> inpShape = inp->get_shape();
|
||||||
for (int i = 0; i < numDims; ++i)
|
for (int i = 0; i < numDims; ++i)
|
||||||
maxDims[i] = std::max(maxDims[i], inpShape[i]);
|
maxDims[i] = std::max(maxDims[i], inpShape[i]);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < inp_nodes.size(); ++i)
|
for (int i = 0; i < inp_nodes.size(); ++i)
|
||||||
{
|
{
|
||||||
bool needPadding = false;
|
bool needPadding = false;
|
||||||
std::vector<size_t> inpShape = ngraphDataNode(inputs[i])->getDims();
|
std::vector<size_t> inpShape = inp_nodes[i].get_shape();
|
||||||
std::vector<int64_t> begins(inpShape.size(), 0), ends(inpShape.size(), 0);
|
std::vector<int64_t> begins(inpShape.size(), 0), ends(inpShape.size(), 0);
|
||||||
for (int j = 0; j < inpShape.size(); ++j)
|
for (int j = 0; j < inpShape.size(); ++j)
|
||||||
{
|
{
|
||||||
|
|||||||
@ -6,6 +6,7 @@
|
|||||||
#include "layers_common.hpp"
|
#include "layers_common.hpp"
|
||||||
#include "../op_cuda.hpp"
|
#include "../op_cuda.hpp"
|
||||||
#include "../op_cann.hpp"
|
#include "../op_cann.hpp"
|
||||||
|
#include "../ie_ngraph.hpp"
|
||||||
|
|
||||||
#include <opencv2/dnn/shape_utils.hpp>
|
#include <opencv2/dnn/shape_utils.hpp>
|
||||||
|
|
||||||
@ -104,6 +105,12 @@ public:
|
|||||||
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::DIV ||
|
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::DIV ||
|
||||||
op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN;
|
op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN;
|
||||||
#endif
|
#endif
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return (op == OPERATION::ADD ||
|
||||||
|
op == OPERATION::PROD ||
|
||||||
|
op == OPERATION::GREATER_EQUAL ||
|
||||||
|
op == OPERATION::LESS_EQUAL
|
||||||
|
);
|
||||||
if (op == OPERATION::MAX || op == OPERATION::MIN || op == OPERATION::SUM ||
|
if (op == OPERATION::MAX || op == OPERATION::MIN || op == OPERATION::SUM ||
|
||||||
op == OPERATION::PROD || op == OPERATION::DIV)
|
op == OPERATION::PROD || op == OPERATION::DIV)
|
||||||
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
||||||
@ -743,6 +750,37 @@ public:
|
|||||||
CV_Assert(inputs.size());
|
CV_Assert(inputs.size());
|
||||||
return inputs.size() * total(outputs[0]);
|
return inputs.size() * total(outputs[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
{
|
||||||
|
CV_Assert(inputs.size() == 2);
|
||||||
|
auto& inp0 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
||||||
|
auto& inp1 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
|
||||||
|
|
||||||
|
if (inp0->get_element_type() != inp1->get_element_type()) {
|
||||||
|
auto dtype = preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD ?
|
||||||
|
ngraph::element::f16 : ngraph::element::f32;
|
||||||
|
if (inp0->get_element_type() != dtype)
|
||||||
|
inp0 = std::make_shared<ngraph::op::v0::Convert>(inp0, dtype);
|
||||||
|
if (inp1->get_element_type() != dtype)
|
||||||
|
inp1 = std::make_shared<ngraph::op::v0::Convert>(inp1, dtype);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ngraph::Node> node;
|
||||||
|
if (op == OPERATION::ADD)
|
||||||
|
node = std::make_shared<ngraph::op::v1::Add>(inp0, inp1);
|
||||||
|
else if (op == OPERATION::PROD)
|
||||||
|
node = std::make_shared<ngraph::op::v1::Multiply>(inp0, inp1);
|
||||||
|
else if (op == OPERATION::GREATER_EQUAL)
|
||||||
|
node = std::make_shared<ngraph::op::v1::GreaterEqual>(inp0, inp1);
|
||||||
|
else if (op == OPERATION::LESS_EQUAL)
|
||||||
|
node = std::make_shared<ngraph::op::v1::LessEqual>(inp0, inp1);
|
||||||
|
else
|
||||||
|
CV_Error(Error::StsNotImplemented, "Operation is not implemented for nGraph backend");
|
||||||
|
return Ptr<BackendNode>(new InfEngineNgraphNode(node));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
Ptr<NaryEltwiseLayer> NaryEltwiseLayer::create(const LayerParams& params)
|
Ptr<NaryEltwiseLayer> NaryEltwiseLayer::create(const LayerParams& params)
|
||||||
|
|||||||
@ -401,6 +401,24 @@ public:
|
|||||||
#else
|
#else
|
||||||
ngraph::op::v4::Interpolate::InterpolateAttrs attrs;
|
ngraph::op::v4::Interpolate::InterpolateAttrs attrs;
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
if (interpolation == "nearest") {
|
||||||
|
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::NEAREST;
|
||||||
|
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL;
|
||||||
|
} else if (interpolation == "bilinear") {
|
||||||
|
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX;
|
||||||
|
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC;
|
||||||
|
} else {
|
||||||
|
CV_Error(Error::StsNotImplemented, format("Unsupported interpolation: %s", interpolation.c_str()));
|
||||||
|
}
|
||||||
|
attrs.shape_calculation_mode = ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES;
|
||||||
|
|
||||||
|
if (alignCorners) {
|
||||||
|
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS;
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs.nearest_mode = ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR;
|
||||||
|
#else
|
||||||
if (interpolation == "nearest") {
|
if (interpolation == "nearest") {
|
||||||
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::nearest;
|
attrs.mode = ngraph::op::v4::Interpolate::InterpolateMode::nearest;
|
||||||
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::half_pixel;
|
attrs.coordinate_transformation_mode = ngraph::op::v4::Interpolate::CoordinateTransformMode::half_pixel;
|
||||||
@ -417,6 +435,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
attrs.nearest_mode = ngraph::op::v4::Interpolate::NearestMode::round_prefer_floor;
|
attrs.nearest_mode = ngraph::op::v4::Interpolate::NearestMode::round_prefer_floor;
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
std::vector<int64_t> shape = {outHeight, outWidth};
|
std::vector<int64_t> shape = {outHeight, outWidth};
|
||||||
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
|
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
|
||||||
|
|||||||
@ -275,19 +275,17 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
|
|||||||
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
|
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
|
||||||
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
||||||
{
|
{
|
||||||
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
|
|
||||||
std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
|
std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
|
||||||
outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName;
|
outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName;
|
||||||
dataPtr->setName(outputName);
|
ld.outputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = outputName;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
||||||
{
|
{
|
||||||
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
|
|
||||||
std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name;
|
std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name;
|
||||||
dataPtr->setName(outputName);
|
ld.outputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = outputName;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -311,26 +309,7 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
|
|||||||
{
|
{
|
||||||
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
|
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
|
||||||
{
|
{
|
||||||
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.inputBlobsWrappers[i]);
|
ld.inputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = netInputLayer->outNames[i];
|
||||||
dataPtr->setName(netInputLayer->outNames[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
||||||
{
|
|
||||||
auto it = ienet.outputsDesc.find(ld.name);
|
|
||||||
if (it != ienet.outputsDesc.end())
|
|
||||||
{
|
|
||||||
const InferenceEngine::TensorDesc& descriptor = it->second;
|
|
||||||
InferenceEngine::DataPtr dataPtr = ngraphDataOutputNode(ld.outputBlobsWrappers[i], descriptor, ld.name);
|
|
||||||
dataPtr->setName(ld.name);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
|
|
||||||
dataPtr->setName(ld.name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ienet.addBlobs(ld.inputBlobsWrappers);
|
ienet.addBlobs(ld.inputBlobsWrappers);
|
||||||
@ -456,10 +435,10 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
|
|||||||
dynamicCast<NgraphBackendWrapper>();
|
dynamicCast<NgraphBackendWrapper>();
|
||||||
CV_Assert(!inpWrapper.empty());
|
CV_Assert(!inpWrapper.empty());
|
||||||
auto iter = std::find(inputNames.begin(), inputNames.end(),
|
auto iter = std::find(inputNames.begin(), inputNames.end(),
|
||||||
inpWrapper->dataPtr->getName());
|
inpWrapper->name);
|
||||||
if (iter == inputNames.end())
|
if (iter == inputNames.end())
|
||||||
{
|
{
|
||||||
inputNames.push_back(inpWrapper->dataPtr->getName());
|
inputNames.push_back(inpWrapper->name);
|
||||||
inputs.push_back(inpLd.outputBlobs[cons_inp]);
|
inputs.push_back(inpLd.outputBlobs[cons_inp]);
|
||||||
}
|
}
|
||||||
curr_pos = cons + 1;
|
curr_pos = cons + 1;
|
||||||
@ -505,7 +484,12 @@ void NetImplOpenVINO::initBackend(const std::vector<LayerPin>& blobsToKeep_)
|
|||||||
CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")");
|
CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")");
|
||||||
|
|
||||||
// Handle parameters from other subnets. Output port is not used in this case
|
// Handle parameters from other subnets. Output port is not used in this case
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
|
||||||
if ((ngraph::op::is_parameter(ngraph_input_node) || ngraph::op::is_constant(ngraph_input_node)) &&
|
if ((ngraph::op::is_parameter(ngraph_input_node) || ngraph::op::is_constant(ngraph_input_node)) &&
|
||||||
|
#else
|
||||||
|
if ((ngraph_input_node->is_parameter() || ngraph_input_node->is_constant()) &&
|
||||||
|
#endif
|
||||||
|
|
||||||
ngraph_input_node->get_output_size() == 1)
|
ngraph_input_node->get_output_size() == 1)
|
||||||
{
|
{
|
||||||
inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ngraph_input_node));
|
inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ngraph_input_node));
|
||||||
@ -702,14 +686,33 @@ Net NetImplOpenVINO::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork
|
|||||||
|
|
||||||
CV_TRACE_REGION("register_inputs");
|
CV_TRACE_REGION("register_inputs");
|
||||||
|
|
||||||
|
auto ngraphFunction = ieNet.getFunction();
|
||||||
|
CV_Assert(ngraphFunction);
|
||||||
|
|
||||||
std::vector<String> inputsNames;
|
std::vector<String> inputsNames;
|
||||||
std::vector<MatShape> inp_shapes;
|
std::vector<MatShape> inp_shapes;
|
||||||
for (auto& it : ieNet.getInputsInfo())
|
for (auto& it : ngraphFunction->get_parameters())
|
||||||
{
|
{
|
||||||
inputsNames.push_back(it.first);
|
inputsNames.push_back(it->get_friendly_name());
|
||||||
std::vector<size_t> dims = it.second->getTensorDesc().getDims();
|
std::vector<size_t> dims = it->get_shape();
|
||||||
inp_shapes.push_back(std::vector<int>(dims.begin(), dims.end()));
|
inp_shapes.push_back(std::vector<int>(dims.begin(), dims.end()));
|
||||||
}
|
}
|
||||||
|
// nGraph models produce output "Result" layers which have "/sink_port" suffix in their names.
|
||||||
|
// Their inputs are actual model outputs and we change friendly name to it.
|
||||||
|
// By this workaround, we produce similar outputs names comparing to ieNet.getOutputsInfo()
|
||||||
|
for (int i = 0; i < ngraphFunction->get_output_size(); ++i) {
|
||||||
|
auto res = ngraphFunction->output(i);
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
const std::string& name = res.get_any_name();
|
||||||
|
#else
|
||||||
|
auto out = res.get_node()->input(0).get_source_output();
|
||||||
|
std::string name = out.get_node()->get_friendly_name();
|
||||||
|
if (out.get_node()->get_output_size() > 1)
|
||||||
|
name += "." + std::to_string(out.get_index());
|
||||||
|
#endif
|
||||||
|
if (res.get_node()->get_friendly_name() != name)
|
||||||
|
res.get_node()->set_friendly_name(name);
|
||||||
|
}
|
||||||
|
|
||||||
Net cvNet;
|
Net cvNet;
|
||||||
Ptr<NetImplOpenVINO> openvino_impl_ptr = makePtr<NetImplOpenVINO>();
|
Ptr<NetImplOpenVINO> openvino_impl_ptr = makePtr<NetImplOpenVINO>();
|
||||||
@ -736,17 +739,15 @@ Net NetImplOpenVINO::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork
|
|||||||
|
|
||||||
CV_TRACE_REGION_NEXT("register_outputs");
|
CV_TRACE_REGION_NEXT("register_outputs");
|
||||||
|
|
||||||
auto ngraphFunction = ieNet.getFunction();
|
|
||||||
CV_Assert(ngraphFunction);
|
|
||||||
std::vector<std::shared_ptr<ngraph::Node>> ngraphOperations = ngraphFunction->get_ops();
|
std::vector<std::shared_ptr<ngraph::Node>> ngraphOperations = ngraphFunction->get_ops();
|
||||||
|
|
||||||
for (auto& it : ieNet.getOutputsInfo())
|
for (auto& it : ngraphFunction->get_results())
|
||||||
{
|
{
|
||||||
CV_TRACE_REGION("output");
|
CV_TRACE_REGION("output");
|
||||||
const auto& outputName = it.first;
|
const auto& outputName = it->get_friendly_name();
|
||||||
|
|
||||||
LayerParams lp;
|
LayerParams lp;
|
||||||
int lid = cvNet.addLayer(it.first, "", lp);
|
int lid = cvNet.addLayer(outputName, "", lp);
|
||||||
|
|
||||||
LayerData& ld = openvino_impl.layers[lid];
|
LayerData& ld = openvino_impl.layers[lid];
|
||||||
|
|
||||||
@ -835,10 +836,15 @@ Net openvino_readNetwork(
|
|||||||
InferenceEngine::CNNNetwork ieNet;
|
InferenceEngine::CNNNetwork ieNet;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
ov::Tensor weights_blob(ov::element::u8, {bufferWeightsSize}, (void*)bufferWeightsPtr);
|
||||||
|
ieNet = ie.read_model(model, weights_blob);
|
||||||
|
#else
|
||||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
|
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
|
||||||
InferenceEngine::Blob::CPtr weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, (uint8_t*)bufferWeightsPtr, bufferWeightsSize);
|
InferenceEngine::Blob::CPtr weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, (uint8_t*)bufferWeightsPtr, bufferWeightsSize);
|
||||||
|
|
||||||
ieNet = ie.ReadNetwork(model, weights_blob);
|
ieNet = ie.ReadNetwork(model, weights_blob);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
catch (const std::exception& e)
|
catch (const std::exception& e)
|
||||||
{
|
{
|
||||||
|
|||||||
@ -39,6 +39,86 @@ cv::String setInferenceEngineBackendType(const cv::String& newBackendType)
|
|||||||
|
|
||||||
CV__DNN_INLINE_NS_END
|
CV__DNN_INLINE_NS_END
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
namespace InferenceEngine {
|
||||||
|
|
||||||
|
CNNNetwork::CNNNetwork() {}
|
||||||
|
|
||||||
|
CNNNetwork::CNNNetwork(std::shared_ptr<ov::Model> model) : model(model) {}
|
||||||
|
|
||||||
|
std::shared_ptr<ov::Model> CNNNetwork::getFunction() const {
|
||||||
|
return model;
|
||||||
|
}
|
||||||
|
|
||||||
|
void CNNNetwork::serialize(const std::string& xmlPath, const std::string& binPath) {
|
||||||
|
ov::pass::Serialize(xmlPath, binPath).run_on_model(model);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CNNNetwork::reshape(const std::map<std::string, std::vector<size_t> >& shapes) {
|
||||||
|
std::map<std::string, ov::PartialShape> partialShapes;
|
||||||
|
for (const auto& it : shapes) {
|
||||||
|
ov::PartialShape shape;
|
||||||
|
shape.insert(shape.begin(), it.second.begin(), it.second.end());
|
||||||
|
partialShapes.insert({it.first, shape});
|
||||||
|
}
|
||||||
|
model->reshape(partialShapes);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> Core::GetAvailableDevices() {
|
||||||
|
return get_available_devices();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Core::UnregisterPlugin(const std::string& id) {
|
||||||
|
unload_plugin(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
CNNNetwork Core::ReadNetwork(const std::string& xmlPath, const std::string& binPath) {
|
||||||
|
return read_model(xmlPath, binPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
ExecutableNetwork Core::LoadNetwork(CNNNetwork net, const std::string& device,
|
||||||
|
const std::map<std::string, std::string>& config) {
|
||||||
|
ov::AnyMap props;
|
||||||
|
for (const auto& it : config) {
|
||||||
|
props.insert(it);
|
||||||
|
}
|
||||||
|
return compile_model(net.getFunction(), device, props);
|
||||||
|
}
|
||||||
|
|
||||||
|
ExecutableNetwork::ExecutableNetwork() {}
|
||||||
|
|
||||||
|
ExecutableNetwork::ExecutableNetwork(const ov::CompiledModel& copy) : CompiledModel(copy) {}
|
||||||
|
|
||||||
|
ov::InferRequest ExecutableNetwork::CreateInferRequest() { return create_infer_request(); }
|
||||||
|
|
||||||
|
} // namespace InferenceEngine
|
||||||
|
|
||||||
|
Mat infEngineBlobToMat(const ov::Tensor& blob)
|
||||||
|
{
|
||||||
|
std::vector<size_t> dims = blob.get_shape();
|
||||||
|
std::vector<int> size(dims.begin(), dims.end());
|
||||||
|
auto precision = blob.get_element_type();
|
||||||
|
|
||||||
|
int type = -1;
|
||||||
|
switch (precision)
|
||||||
|
{
|
||||||
|
case ov::element::f32: type = CV_32F; break;
|
||||||
|
case ov::element::u8: type = CV_8U; break;
|
||||||
|
default:
|
||||||
|
CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
|
||||||
|
}
|
||||||
|
return Mat(size, type, blob.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
void infEngineBlobsToMats(const ov::TensorVector& blobs,
|
||||||
|
std::vector<Mat>& mats)
|
||||||
|
{
|
||||||
|
mats.resize(blobs.size());
|
||||||
|
for (int i = 0; i < blobs.size(); ++i)
|
||||||
|
mats[i] = infEngineBlobToMat(blobs[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
|
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
|
||||||
{
|
{
|
||||||
@ -65,7 +145,7 @@ void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
|
|||||||
for (int i = 0; i < blobs.size(); ++i)
|
for (int i = 0; i < blobs.size(); ++i)
|
||||||
mats[i] = infEngineBlobToMat(blobs[i]);
|
mats[i] = infEngineBlobToMat(blobs[i]);
|
||||||
}
|
}
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
static bool init_IE_plugins()
|
static bool init_IE_plugins()
|
||||||
{
|
{
|
||||||
@ -130,7 +210,11 @@ static bool detectArmPlugin_()
|
|||||||
{
|
{
|
||||||
if (i->find("CPU") != std::string::npos)
|
if (i->find("CPU") != std::string::npos)
|
||||||
{
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
const std::string name = ie.get_property(*i, ov::device::full_name);
|
||||||
|
#else
|
||||||
const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
|
const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
|
||||||
|
#endif
|
||||||
CV_LOG_INFO(NULL, "CPU plugin: " << name);
|
CV_LOG_INFO(NULL, "CPU plugin: " << name);
|
||||||
return name.find("arm_compute::NEON") != std::string::npos;
|
return name.find("arm_compute::NEON") != std::string::npos;
|
||||||
}
|
}
|
||||||
@ -150,7 +234,11 @@ static bool detectMyriadX_(const std::string& device)
|
|||||||
{
|
{
|
||||||
if (i->find(device) != std::string::npos)
|
if (i->find(device) != std::string::npos)
|
||||||
{
|
{
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
const std::string name = ie.get_property(*i, ov::device::full_name);
|
||||||
|
#else
|
||||||
const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
|
const std::string name = ie.GetMetric(*i, METRIC_KEY(FULL_DEVICE_NAME)).as<std::string>();
|
||||||
|
#endif
|
||||||
CV_LOG_INFO(NULL, "Myriad device: " << name);
|
CV_LOG_INFO(NULL, "Myriad device: " << name);
|
||||||
return name.find("MyriadX") != std::string::npos || name.find("Myriad X") != std::string::npos || name.find("HDDL") != std::string::npos;
|
return name.find("MyriadX") != std::string::npos || name.find("Myriad X") != std::string::npos || name.find("HDDL") != std::string::npos;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -19,11 +19,6 @@
|
|||||||
|
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
|
||||||
#define INF_ENGINE_RELEASE_2018R5 2018050000
|
|
||||||
#define INF_ENGINE_RELEASE_2019R1 2019010000
|
|
||||||
#define INF_ENGINE_RELEASE_2019R2 2019020000
|
|
||||||
#define INF_ENGINE_RELEASE_2019R3 2019030000
|
|
||||||
#define INF_ENGINE_RELEASE_2020_1 2020010000
|
|
||||||
#define INF_ENGINE_RELEASE_2020_2 2020020000
|
#define INF_ENGINE_RELEASE_2020_2 2020020000
|
||||||
#define INF_ENGINE_RELEASE_2020_3 2020030000
|
#define INF_ENGINE_RELEASE_2020_3 2020030000
|
||||||
#define INF_ENGINE_RELEASE_2020_4 2020040000
|
#define INF_ENGINE_RELEASE_2020_4 2020040000
|
||||||
@ -31,6 +26,7 @@
|
|||||||
#define INF_ENGINE_RELEASE_2021_2 2021020000
|
#define INF_ENGINE_RELEASE_2021_2 2021020000
|
||||||
#define INF_ENGINE_RELEASE_2021_3 2021030000
|
#define INF_ENGINE_RELEASE_2021_3 2021030000
|
||||||
#define INF_ENGINE_RELEASE_2021_4 2021040000
|
#define INF_ENGINE_RELEASE_2021_4 2021040000
|
||||||
|
#define INF_ENGINE_RELEASE_2022_1 2022010000
|
||||||
|
|
||||||
#ifndef INF_ENGINE_RELEASE
|
#ifndef INF_ENGINE_RELEASE
|
||||||
#warning("IE version have not been provided via command-line. Using 2021.4 by default")
|
#warning("IE version have not been provided via command-line. Using 2021.4 by default")
|
||||||
@ -48,7 +44,13 @@
|
|||||||
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
#include <openvino/openvino.hpp>
|
||||||
|
#include <openvino/pass/serialize.hpp>
|
||||||
|
#include <openvino/pass/convert_fp32_to_fp16.hpp>
|
||||||
|
#else
|
||||||
#include <inference_engine.hpp>
|
#include <inference_engine.hpp>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC__ >= 5
|
#if defined(__GNUC__) && __GNUC__ >= 5
|
||||||
//#pragma GCC diagnostic pop
|
//#pragma GCC diagnostic pop
|
||||||
@ -73,11 +75,17 @@ CV__DNN_INLINE_NS_END
|
|||||||
|
|
||||||
Backend& getInferenceEngineBackendTypeParam();
|
Backend& getInferenceEngineBackendTypeParam();
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
Mat infEngineBlobToMat(const ov::Tensor& blob);
|
||||||
|
|
||||||
|
void infEngineBlobsToMats(const ov::TensorVector& blobs,
|
||||||
|
std::vector<Mat>& mats);
|
||||||
|
#else
|
||||||
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
|
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
|
||||||
|
|
||||||
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
|
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
|
||||||
std::vector<Mat>& mats);
|
std::vector<Mat>& mats);
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
|
|
||||||
CV__DNN_INLINE_NS_BEGIN
|
CV__DNN_INLINE_NS_BEGIN
|
||||||
@ -90,6 +98,52 @@ bool isArmComputePlugin();
|
|||||||
|
|
||||||
CV__DNN_INLINE_NS_END
|
CV__DNN_INLINE_NS_END
|
||||||
|
|
||||||
|
// A series of wrappers for classes from OpenVINO API 2.0.
|
||||||
|
// Need just for less conditional compilation inserts.
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
|
||||||
|
namespace InferenceEngine {
|
||||||
|
|
||||||
|
class CNNNetwork {
|
||||||
|
public:
|
||||||
|
CNNNetwork();
|
||||||
|
|
||||||
|
CNNNetwork(std::shared_ptr<ov::Model> model);
|
||||||
|
|
||||||
|
std::shared_ptr<ov::Model> getFunction() const;
|
||||||
|
|
||||||
|
void serialize(const std::string& xmlPath, const std::string& binPath);
|
||||||
|
|
||||||
|
void reshape(const std::map<std::string, std::vector<size_t> >& shapes);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::shared_ptr<ov::Model> model = nullptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef ov::InferRequest InferRequest;
|
||||||
|
|
||||||
|
class ExecutableNetwork : public ov::CompiledModel {
|
||||||
|
public:
|
||||||
|
ExecutableNetwork();
|
||||||
|
|
||||||
|
ExecutableNetwork(const ov::CompiledModel& copy);
|
||||||
|
|
||||||
|
ov::InferRequest CreateInferRequest();
|
||||||
|
};
|
||||||
|
|
||||||
|
class Core : public ov::Core {
|
||||||
|
public:
|
||||||
|
std::vector<std::string> GetAvailableDevices();
|
||||||
|
|
||||||
|
void UnregisterPlugin(const std::string& id);
|
||||||
|
|
||||||
|
CNNNetwork ReadNetwork(const std::string& xmlPath, const std::string& binPath);
|
||||||
|
|
||||||
|
ExecutableNetwork LoadNetwork(CNNNetwork net, const std::string& device,
|
||||||
|
const std::map<std::string, std::string>& config);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif // OpenVINO >= 2022.1
|
||||||
|
|
||||||
InferenceEngine::Core& getCore(const std::string& id);
|
InferenceEngine::Core& getCore(const std::string& id);
|
||||||
|
|
||||||
|
|||||||
@ -531,7 +531,7 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
|
|||||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||||
{
|
{
|
||||||
l1 = 0.4;
|
l1 = 0.4;
|
||||||
lInf = 7.45;
|
lInf = 7.46;
|
||||||
}
|
}
|
||||||
else if (target == DNN_TARGET_CUDA_FP16)
|
else if (target == DNN_TARGET_CUDA_FP16)
|
||||||
{
|
{
|
||||||
|
|||||||
@ -725,18 +725,21 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
|
|||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
double scoreDiff = 0.0;
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
|
||||||
// Check 'backward_compatible_check || in_out_elements_equal' failed at core/src/op/reshape.cpp:427:
|
// Check 'backward_compatible_check || in_out_elements_equal' failed at core/src/op/reshape.cpp:427:
|
||||||
// While validating node 'v1::Reshape bbox_pred_reshape (bbox_pred[0]:f32{1,84}, Constant_265242[0]:i64{4}) -> (f32{?,?,?,?})' with friendly_name 'bbox_pred_reshape':
|
// While validating node 'v1::Reshape bbox_pred_reshape (bbox_pred[0]:f32{1,84}, Constant_265242[0]:i64{4}) -> (f32{?,?,?,?})' with friendly_name 'bbox_pred_reshape':
|
||||||
// Requested output shape {1,6300,4,1} is incompatible with input shape {1, 84}
|
// Requested output shape {1,6300,4,1} is incompatible with input shape {1, 84}
|
||||||
if (target == DNN_TARGET_MYRIAD)
|
if (target == DNN_TARGET_MYRIAD)
|
||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
|
if (target == DNN_TARGET_OPENCL_FP16)
|
||||||
|
scoreDiff = 0.02;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849,
|
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849,
|
||||||
0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953,
|
0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953,
|
||||||
0, 12, 0.993028, 133.221, 189.377, 350.994, 563.166);
|
0, 12, 0.993028, 133.221, 189.377, 350.994, 563.166);
|
||||||
testFaster("faster_rcnn_vgg16.prototxt", "VGG16_faster_rcnn_final.caffemodel", ref);
|
testFaster("faster_rcnn_vgg16.prototxt", "VGG16_faster_rcnn_final.caffemodel", ref, scoreDiff);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(Test_Caffe_nets, FasterRCNN_zf)
|
TEST_P(Test_Caffe_nets, FasterRCNN_zf)
|
||||||
|
|||||||
@ -638,6 +638,11 @@ TEST_P(Test_Darknet_nets, YOLOv3)
|
|||||||
double scoreDiff = 8e-5, iouDiff = 3e-4;
|
double scoreDiff = 8e-5, iouDiff = 3e-4;
|
||||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||||
{
|
{
|
||||||
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2022010000)
|
||||||
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
scoreDiff = 0.009;
|
||||||
|
else
|
||||||
|
#endif
|
||||||
scoreDiff = 0.006;
|
scoreDiff = 0.006;
|
||||||
iouDiff = 0.042;
|
iouDiff = 0.042;
|
||||||
}
|
}
|
||||||
@ -771,6 +776,7 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
|||||||
// accuracy (batch 2)
|
// accuracy (batch 2)
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||||
{
|
{
|
||||||
|
scoreDiff = 0.008f;
|
||||||
iouDiff = 0.05f;
|
iouDiff = 0.05f;
|
||||||
}
|
}
|
||||||
// accuracy
|
// accuracy
|
||||||
|
|||||||
@ -438,6 +438,9 @@ TEST_P(DNNTestOpenVINO, models)
|
|||||||
{
|
{
|
||||||
auto dstIt = cvOutputsMap.find(srcIt.first);
|
auto dstIt = cvOutputsMap.find(srcIt.first);
|
||||||
CV_Assert(dstIt != cvOutputsMap.end());
|
CV_Assert(dstIt != cvOutputsMap.end());
|
||||||
|
|
||||||
|
dstIt->second.convertTo(dstIt->second, srcIt.second.type());
|
||||||
|
|
||||||
double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF);
|
double normInf = cvtest::norm(srcIt.second, dstIt->second, cv::NORM_INF);
|
||||||
EXPECT_LE(normInf, eps) << "output=" << srcIt.first;
|
EXPECT_LE(normInf, eps) << "output=" << srcIt.first;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1292,7 +1292,7 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
|
|||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||||
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Convolution");
|
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Convolution");
|
||||||
else
|
else
|
||||||
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Add");
|
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Result");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
|
TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
|
||||||
|
|||||||
@ -447,6 +447,10 @@ TEST_P(Test_Model, DetectionOutput)
|
|||||||
{
|
{
|
||||||
if (backend == DNN_BACKEND_OPENCV)
|
if (backend == DNN_BACKEND_OPENCV)
|
||||||
scoreDiff = 4e-3;
|
scoreDiff = 4e-3;
|
||||||
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2022010000)
|
||||||
|
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
scoreDiff = 4e-2;
|
||||||
|
#endif
|
||||||
else
|
else
|
||||||
scoreDiff = 2e-2;
|
scoreDiff = 2e-2;
|
||||||
iouDiff = 1.8e-1;
|
iouDiff = 1.8e-1;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user