Merge branch '4.x' of github.com:opencv/opencv into at/sync-ie-request-pool
This commit is contained in:
commit
9fd877acc9
@ -92,6 +92,18 @@ struct ParamDesc {
|
||||
cv::optional<cv::gapi::wip::onevpl::Context> vpl_preproc_ctx;
|
||||
|
||||
InferMode mode;
|
||||
|
||||
using PrecisionT = int;
|
||||
using PrecisionMapT = std::unordered_map<std::string, PrecisionT>;
|
||||
// NB: This parameter can contain:
|
||||
// 1. cv::util::monostate - Don't specify precision, but use default from IR/Blob.
|
||||
// 2. PrecisionT (CV_8U, CV_32F, ...) - Specifies precision for all output layers.
|
||||
// 3. PrecisionMapT ({{"layer0", CV_32F}, {"layer1", CV_16F}} - Specifies precision for certain output layer.
|
||||
// cv::util::monostate is default value that means precision wasn't specified.
|
||||
using PrecisionVariantT = cv::util::variant<cv::util::monostate,
|
||||
PrecisionT,
|
||||
PrecisionMapT>;
|
||||
PrecisionVariantT output_precision;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
@ -137,7 +149,8 @@ public:
|
||||
, {}
|
||||
, {}
|
||||
, {}
|
||||
, InferMode::Async} {
|
||||
, InferMode::Async
|
||||
, {} } {
|
||||
};
|
||||
|
||||
/** @overload
|
||||
@ -162,7 +175,8 @@ public:
|
||||
, {}
|
||||
, {}
|
||||
, {}
|
||||
, InferMode::Async} {
|
||||
, InferMode::Async
|
||||
, {} } {
|
||||
};
|
||||
|
||||
/** @brief Specifies sequence of network input layers names for inference.
|
||||
@ -373,6 +387,31 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @brief Specifies the output precision for model.
|
||||
|
||||
The function is used to set an output precision for model.
|
||||
|
||||
@param precision Precision in OpenCV format (CV_8U, CV_32F, ...)
|
||||
will be applied to all output layers.
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>& cfgOutputPrecision(detail::ParamDesc::PrecisionT precision) {
|
||||
desc.output_precision = precision;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload
|
||||
|
||||
@param precision_map Map of pairs: name of corresponding output layer
|
||||
and its precision in OpenCV format (CV_8U, CV_32F, ...)
|
||||
@return reference to this parameter structure.
|
||||
*/
|
||||
Params<Net>&
|
||||
cfgOutputPrecision(detail::ParamDesc::PrecisionMapT precision_map) {
|
||||
desc.output_precision = precision_map;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return Net::tag(); }
|
||||
@ -407,8 +446,7 @@ public:
|
||||
const std::string &device)
|
||||
: desc{ model, weights, device, {}, {}, {}, 0u, 0u,
|
||||
detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u,
|
||||
{}, {}, {}, {},
|
||||
InferMode::Async },
|
||||
{}, {}, {}, {}, InferMode::Async, {} },
|
||||
m_tag(tag) {
|
||||
};
|
||||
|
||||
@ -426,8 +464,7 @@ public:
|
||||
const std::string &device)
|
||||
: desc{ model, {}, device, {}, {}, {}, 0u, 0u,
|
||||
detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u,
|
||||
{}, {}, {}, {},
|
||||
InferMode::Async },
|
||||
{}, {}, {}, {}, InferMode::Async, {} },
|
||||
m_tag(tag) {
|
||||
};
|
||||
|
||||
@ -506,6 +543,19 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @see ie::Params::cfgOutputPrecision */
|
||||
Params& cfgOutputPrecision(detail::ParamDesc::PrecisionT precision) {
|
||||
desc.output_precision = precision;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** @overload */
|
||||
Params&
|
||||
cfgOutputPrecision(detail::ParamDesc::PrecisionMapT precision_map) {
|
||||
desc.output_precision = precision_map;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// BEGIN(G-API's network parametrization API)
|
||||
GBackend backend() const { return cv::gapi::ie::backend(); }
|
||||
std::string tag() const { return m_tag; }
|
||||
|
||||
@ -221,6 +221,12 @@ InferParams read<InferParams>(const cv::FileNode& fn) {
|
||||
params.input_layers = readList<std::string>(fn, "input_layers", name);
|
||||
params.output_layers = readList<std::string>(fn, "output_layers", name);
|
||||
params.config = readMap<std::string>(fn["config"]);
|
||||
|
||||
auto out_prec_str = readOpt<std::string>(fn["output_precision"]);
|
||||
if (out_prec_str.has_value()) {
|
||||
params.out_precision =
|
||||
cv::optional<int>(strToPrecision(out_prec_str.value()));
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
|
||||
@ -18,6 +18,7 @@ public:
|
||||
const bool drop_frames);
|
||||
bool pull(cv::gapi::wip::Data& data) override;
|
||||
cv::GMetaArg descr_of() const override;
|
||||
double latency() const { return m_latency; };
|
||||
|
||||
private:
|
||||
double m_latency;
|
||||
|
||||
@ -1,13 +1,18 @@
|
||||
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
|
||||
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
|
||||
|
||||
#include <iomanip>
|
||||
|
||||
struct PerfReport {
|
||||
std::string name;
|
||||
double avg_latency = 0.0;
|
||||
double throughput = 0.0;
|
||||
int64_t first_run_latency = 0;
|
||||
int64_t elapsed = 0;
|
||||
int64_t compilation_time = 0;
|
||||
std::string name;
|
||||
double avg_latency = 0.0;
|
||||
int64_t min_latency = 0;
|
||||
int64_t max_latency = 0;
|
||||
int64_t first_latency = 0;
|
||||
double throughput = 0.0;
|
||||
int64_t elapsed = 0;
|
||||
int64_t warmup_time = 0;
|
||||
int64_t num_late_frames = 0;
|
||||
std::vector<int64_t> latencies;
|
||||
|
||||
std::string toStr(bool expanded = false) const;
|
||||
@ -15,17 +20,19 @@ struct PerfReport {
|
||||
|
||||
std::string PerfReport::toStr(bool expand) const {
|
||||
std::stringstream ss;
|
||||
ss << name << ": Compilation time: " << compilation_time << " ms; "
|
||||
<< "Average latency: " << avg_latency << " ms; Throughput: "
|
||||
<< throughput << " FPS; First latency: "
|
||||
<< first_run_latency << " ms";
|
||||
|
||||
ss << name << ": \n"
|
||||
<< " Warm up time: " << warmup_time << " ms\n"
|
||||
<< " Execution time: " << elapsed << " ms\n"
|
||||
<< " Frames: " << num_late_frames << "/" << latencies.size() << " (late/all)\n"
|
||||
<< " Latency:\n"
|
||||
<< " first: " << first_latency << " ms\n"
|
||||
<< " min: " << min_latency << " ms\n"
|
||||
<< " max: " << max_latency << " ms\n"
|
||||
<< " avg: " << std::fixed << std::setprecision(3) << avg_latency << " ms\n"
|
||||
<< " Throughput: " << std::fixed << std::setprecision(3) << throughput << " FPS";
|
||||
if (expand) {
|
||||
ss << "\nTotal processed frames: " << latencies.size()
|
||||
<< "\nTotal elapsed time: " << elapsed << " ms" << std::endl;
|
||||
for (size_t i = 0; i < latencies.size(); ++i) {
|
||||
ss << std::endl;
|
||||
ss << "Frame:" << i << "\nLatency: "
|
||||
ss << "\nFrame:" << i << "\nLatency: "
|
||||
<< latencies[i] << " ms";
|
||||
}
|
||||
}
|
||||
@ -37,11 +44,11 @@ class Pipeline {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<Pipeline>;
|
||||
|
||||
Pipeline(std::string&& name,
|
||||
cv::GComputation&& comp,
|
||||
cv::gapi::wip::IStreamSource::Ptr&& src,
|
||||
cv::GCompileArgs&& args,
|
||||
const size_t num_outputs);
|
||||
Pipeline(std::string&& name,
|
||||
cv::GComputation&& comp,
|
||||
std::shared_ptr<DummySource>&& src,
|
||||
cv::GCompileArgs&& args,
|
||||
const size_t num_outputs);
|
||||
|
||||
void compile();
|
||||
void run(double work_time_ms);
|
||||
@ -59,19 +66,19 @@ protected:
|
||||
virtual void _compile() = 0;
|
||||
virtual RunPerf _run(double work_time_ms) = 0;
|
||||
|
||||
std::string m_name;
|
||||
cv::GComputation m_comp;
|
||||
cv::gapi::wip::IStreamSource::Ptr m_src;
|
||||
cv::GCompileArgs m_args;
|
||||
size_t m_num_outputs;
|
||||
PerfReport m_perf;
|
||||
std::string m_name;
|
||||
cv::GComputation m_comp;
|
||||
std::shared_ptr<DummySource> m_src;
|
||||
cv::GCompileArgs m_args;
|
||||
size_t m_num_outputs;
|
||||
PerfReport m_perf;
|
||||
};
|
||||
|
||||
Pipeline::Pipeline(std::string&& name,
|
||||
cv::GComputation&& comp,
|
||||
cv::gapi::wip::IStreamSource::Ptr&& src,
|
||||
cv::GCompileArgs&& args,
|
||||
const size_t num_outputs)
|
||||
Pipeline::Pipeline(std::string&& name,
|
||||
cv::GComputation&& comp,
|
||||
std::shared_ptr<DummySource>&& src,
|
||||
cv::GCompileArgs&& args,
|
||||
const size_t num_outputs)
|
||||
: m_name(std::move(name)),
|
||||
m_comp(std::move(comp)),
|
||||
m_src(std::move(src)),
|
||||
@ -81,7 +88,7 @@ Pipeline::Pipeline(std::string&& name,
|
||||
}
|
||||
|
||||
void Pipeline::compile() {
|
||||
m_perf.compilation_time =
|
||||
m_perf.warmup_time =
|
||||
utils::measure<std::chrono::milliseconds>([this]() {
|
||||
_compile();
|
||||
});
|
||||
@ -90,17 +97,23 @@ void Pipeline::compile() {
|
||||
void Pipeline::run(double work_time_ms) {
|
||||
auto run_perf = _run(work_time_ms);
|
||||
|
||||
m_perf.elapsed = run_perf.elapsed;
|
||||
m_perf.latencies = std::move(run_perf.latencies);
|
||||
m_perf.elapsed = run_perf.elapsed;
|
||||
m_perf.latencies = std::move(run_perf.latencies);
|
||||
m_perf.avg_latency = utils::avg(m_perf.latencies);
|
||||
m_perf.min_latency = utils::min(m_perf.latencies);
|
||||
m_perf.max_latency = utils::max(m_perf.latencies);
|
||||
m_perf.first_latency = m_perf.latencies[0];
|
||||
|
||||
// NB: Count how many executions don't fit into camera latency interval.
|
||||
m_perf.num_late_frames =
|
||||
std::count_if(m_perf.latencies.begin(), m_perf.latencies.end(),
|
||||
[this](int64_t latency) {
|
||||
return static_cast<double>(latency) > m_src->latency();
|
||||
});
|
||||
|
||||
m_perf.avg_latency =
|
||||
std::accumulate(m_perf.latencies.begin(),
|
||||
m_perf.latencies.end(),
|
||||
0.0) / static_cast<double>(m_perf.latencies.size());
|
||||
m_perf.throughput =
|
||||
(m_perf.latencies.size() / static_cast<double>(m_perf.elapsed)) * 1000;
|
||||
|
||||
m_perf.first_run_latency = m_perf.latencies[0];
|
||||
}
|
||||
|
||||
const PerfReport& Pipeline::report() const {
|
||||
|
||||
@ -259,6 +259,7 @@ struct InferParams {
|
||||
std::vector<std::string> output_layers;
|
||||
std::map<std::string, std::string> config;
|
||||
cv::gapi::ie::InferMode mode;
|
||||
cv::util::optional<int> out_precision;
|
||||
};
|
||||
|
||||
class PipelineBuilder {
|
||||
@ -296,15 +297,15 @@ private:
|
||||
std::vector<Edge> output_edges;
|
||||
};
|
||||
|
||||
M<std::string, Node::Ptr> calls_map;
|
||||
std::vector<Node::Ptr> all_calls;
|
||||
M<std::string, Node::Ptr> calls_map;
|
||||
std::vector<Node::Ptr> all_calls;
|
||||
|
||||
cv::gapi::GNetPackage networks;
|
||||
cv::gapi::GKernelPackage kernels;
|
||||
cv::GCompileArgs compile_args;
|
||||
cv::gapi::wip::IStreamSource::Ptr src;
|
||||
PLMode mode = PLMode::STREAMING;
|
||||
std::string name;
|
||||
cv::gapi::GNetPackage networks;
|
||||
cv::gapi::GKernelPackage kernels;
|
||||
cv::GCompileArgs compile_args;
|
||||
std::shared_ptr<DummySource> src;
|
||||
PLMode mode = PLMode::STREAMING;
|
||||
std::string name;
|
||||
};
|
||||
|
||||
std::unique_ptr<State> m_state;
|
||||
@ -364,6 +365,9 @@ void PipelineBuilder::addInfer(const CallParams& call_params,
|
||||
|
||||
pp->pluginConfig(infer_params.config);
|
||||
pp->cfgInferMode(infer_params.mode);
|
||||
if (infer_params.out_precision) {
|
||||
pp->cfgOutputPrecision(infer_params.out_precision.value());
|
||||
}
|
||||
m_state->networks += cv::gapi::networks(*pp);
|
||||
|
||||
addCall(call_params,
|
||||
|
||||
@ -104,6 +104,21 @@ void mergeMapWith(std::map<K, V>& target, const std::map<K, V>& second) {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
double avg(const std::vector<T>& vec) {
|
||||
return std::accumulate(vec.begin(), vec.end(), 0.0) / vec.size();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T max(const std::vector<T>& vec) {
|
||||
return *std::max_element(vec.begin(), vec.end());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T min(const std::vector<T>& vec) {
|
||||
return *std::min_element(vec.begin(), vec.end());
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
|
||||
#endif // OPENCV_GAPI_PIPELINE_MODELING_TOOL_UTILS_HPP
|
||||
|
||||
@ -197,6 +197,16 @@ inline IE::Blob::Ptr wrapIE(const cv::MediaFrame::View& view,
|
||||
|
||||
template<class MatType>
|
||||
inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
|
||||
const auto& desc = blob->getTensorDesc();
|
||||
const auto ie_type = toCV(desc.getPrecision());
|
||||
if (ie_type != mat.type()) {
|
||||
std::stringstream ss;
|
||||
ss << "Failed to copy blob from IE to OCV: "
|
||||
<< "Blobs have different data types "
|
||||
<< "(IE type: " << ie_type
|
||||
<< " vs OCV type: " << mat.type() << ")." << std::endl;
|
||||
throw std::logic_error(ss.str());
|
||||
}
|
||||
switch (blob->getTensorDesc().getPrecision()) {
|
||||
#define HANDLE(E,T) \
|
||||
case IE::Precision::E: std::copy_n(blob->buffer().as<T*>(), \
|
||||
@ -365,6 +375,13 @@ struct IEUnit {
|
||||
cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind"));
|
||||
}
|
||||
|
||||
if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import &&
|
||||
!cv::util::holds_alternative<cv::util::monostate>(params.output_precision)) {
|
||||
cv::util::throw_error(
|
||||
std::logic_error("Setting output precision isn't supported for imported network"));
|
||||
}
|
||||
|
||||
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
if (params.vpl_preproc_device.has_value() && params.vpl_preproc_ctx.has_value()) {
|
||||
using namespace cv::gapi::wip;
|
||||
@ -1188,6 +1205,28 @@ static IE::PreProcessInfo configurePreProcInfo(const IE::InputInfo::CPtr& ii,
|
||||
return info;
|
||||
}
|
||||
|
||||
using namespace cv::gapi::ie::detail;
|
||||
static void configureOutputPrecision(const IE::OutputsDataMap &outputs_info,
|
||||
const ParamDesc::PrecisionVariantT &output_precision) {
|
||||
cv::util::visit(cv::util::overload_lambdas(
|
||||
[&outputs_info](ParamDesc::PrecisionT cvdepth) {
|
||||
auto precision = toIE(cvdepth);
|
||||
for (auto it : outputs_info) {
|
||||
it.second->setPrecision(precision);
|
||||
}
|
||||
},
|
||||
[&outputs_info](const ParamDesc::PrecisionMapT& precision_map) {
|
||||
for (auto it : precision_map) {
|
||||
outputs_info.at(it.first)->setPrecision(toIE(it.second));
|
||||
}
|
||||
},
|
||||
[&outputs_info](cv::util::monostate) {
|
||||
// Do nothing.
|
||||
}
|
||||
), output_precision
|
||||
);
|
||||
}
|
||||
|
||||
// NB: This is a callback used by async infer
|
||||
// to post outputs blobs (cv::GMat's).
|
||||
static void PostOutputs(InferenceEngine::InferRequest &request,
|
||||
@ -1307,7 +1346,7 @@ struct Infer: public cv::detail::KernelTag {
|
||||
GAPI_Assert(uu.params.input_names.size() == in_metas.size()
|
||||
&& "Known input layers count doesn't match input meta count");
|
||||
|
||||
// NB: Configuring input precision and network reshape must be done
|
||||
// NB: Configuring input/output precision and network reshape must be done
|
||||
// only in the loadNetwork case.
|
||||
using namespace cv::gapi::ie::detail;
|
||||
if (uu.params.kind == ParamDesc::Kind::Load) {
|
||||
@ -1341,6 +1380,7 @@ struct Infer: public cv::detail::KernelTag {
|
||||
if (!input_reshape_table.empty()) {
|
||||
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
||||
}
|
||||
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
||||
} else {
|
||||
GAPI_Assert(uu.params.kind == ParamDesc::Kind::Import);
|
||||
auto inputs = uu.this_network.GetInputsInfo();
|
||||
@ -1456,6 +1496,7 @@ struct InferROI: public cv::detail::KernelTag {
|
||||
const_cast<IEUnit::InputFramesDesc &>(uu.net_input_params)
|
||||
.set_param(input_name, ii->getTensorDesc());
|
||||
}
|
||||
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
||||
} else {
|
||||
GAPI_Assert(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import);
|
||||
auto inputs = uu.this_network.GetInputsInfo();
|
||||
@ -1573,6 +1614,7 @@ struct InferList: public cv::detail::KernelTag {
|
||||
if (!input_reshape_table.empty()) {
|
||||
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
||||
}
|
||||
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
||||
} else {
|
||||
GAPI_Assert(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import);
|
||||
std::size_t idx = 1u;
|
||||
@ -1726,6 +1768,7 @@ struct InferList2: public cv::detail::KernelTag {
|
||||
if (!input_reshape_table.empty()) {
|
||||
const_cast<IE::CNNNetwork *>(&uu.net)->reshape(input_reshape_table);
|
||||
}
|
||||
configureOutputPrecision(uu.net.getOutputsInfo(), uu.params.output_precision);
|
||||
} else {
|
||||
GAPI_Assert(uu.params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import);
|
||||
auto inputs = uu.this_network.GetInputsInfo();
|
||||
|
||||
@ -3027,6 +3027,111 @@ TEST_F(AgeGenderInferTest, ThrowSyncWithNireqNotEqualToOne) {
|
||||
cv::compile_args(cv::gapi::networks(pp))));
|
||||
}
|
||||
|
||||
TEST(TestAgeGenderIE, ChangeOutputPrecision)
|
||||
{
|
||||
initDLDTDataPath();
|
||||
|
||||
cv::gapi::ie::detail::ParamDesc params;
|
||||
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
|
||||
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
|
||||
params.device_id = "CPU";
|
||||
|
||||
cv::Mat in_mat(cv::Size(320, 240), CV_8UC3);
|
||||
cv::randu(in_mat, 0, 255);
|
||||
|
||||
cv::Mat gapi_age, gapi_gender;
|
||||
|
||||
// Load & run IE network
|
||||
IE::Blob::Ptr ie_age, ie_gender;
|
||||
{
|
||||
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
|
||||
auto net = cv::gimpl::ie::wrap::readNetwork(params);
|
||||
setNetParameters(net);
|
||||
for (auto it : net.getOutputsInfo()) {
|
||||
it.second->setPrecision(IE::Precision::U8);
|
||||
}
|
||||
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
|
||||
auto infer_request = this_network.CreateInferRequest();
|
||||
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
|
||||
infer_request.Infer();
|
||||
ie_age = infer_request.GetBlob("age_conv3");
|
||||
ie_gender = infer_request.GetBlob("prob");
|
||||
}
|
||||
|
||||
// Configure & run G-API
|
||||
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
|
||||
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
|
||||
|
||||
cv::GMat in;
|
||||
cv::GMat age, gender;
|
||||
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
|
||||
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
|
||||
|
||||
auto pp = cv::gapi::ie::Params<AgeGender> {
|
||||
params.model_path, params.weights_path, params.device_id
|
||||
}.cfgOutputLayers({ "age_conv3", "prob" })
|
||||
.cfgOutputPrecision(CV_8U);
|
||||
comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender),
|
||||
cv::compile_args(cv::gapi::networks(pp)));
|
||||
|
||||
// Validate with IE itself (avoid DNN module dependency here)
|
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
|
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
|
||||
}
|
||||
|
||||
TEST(TestAgeGenderIE, ChangeSpecificOutputPrecison)
|
||||
{
|
||||
initDLDTDataPath();
|
||||
|
||||
cv::gapi::ie::detail::ParamDesc params;
|
||||
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
|
||||
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
|
||||
params.device_id = "CPU";
|
||||
|
||||
cv::Mat in_mat(cv::Size(320, 240), CV_8UC3);
|
||||
cv::randu(in_mat, 0, 255);
|
||||
|
||||
cv::Mat gapi_age, gapi_gender;
|
||||
|
||||
// Load & run IE network
|
||||
IE::Blob::Ptr ie_age, ie_gender;
|
||||
{
|
||||
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
|
||||
auto net = cv::gimpl::ie::wrap::readNetwork(params);
|
||||
setNetParameters(net);
|
||||
|
||||
// NB: Specify precision only for "prob" output.
|
||||
net.getOutputsInfo().at("prob")->setPrecision(IE::Precision::U8);
|
||||
|
||||
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
|
||||
auto infer_request = this_network.CreateInferRequest();
|
||||
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
|
||||
infer_request.Infer();
|
||||
ie_age = infer_request.GetBlob("age_conv3");
|
||||
ie_gender = infer_request.GetBlob("prob");
|
||||
}
|
||||
|
||||
// Configure & run G-API
|
||||
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
|
||||
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
|
||||
|
||||
cv::GMat in;
|
||||
cv::GMat age, gender;
|
||||
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
|
||||
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
|
||||
|
||||
auto pp = cv::gapi::ie::Params<AgeGender> {
|
||||
params.model_path, params.weights_path, params.device_id
|
||||
}.cfgOutputLayers({ "age_conv3", "prob" })
|
||||
.cfgOutputPrecision({{"prob", CV_8U}});
|
||||
comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender),
|
||||
cv::compile_args(cv::gapi::networks(pp)));
|
||||
|
||||
// Validate with IE itself (avoid DNN module dependency here)
|
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
|
||||
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
|
||||
}
|
||||
|
||||
} // namespace opencv_test
|
||||
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
Loading…
Reference in New Issue
Block a user