Merge pull request #21566 from TolyaTalamanov:at/gapi-modeling-tool_fix_handling_fp16_precision
[G-API] Pipeline modeling tool - Fix generating FP16 Mat * Fix handling fp16 precision * Disable preproc in case FP16 input inside IE backend * Fix isApplicableForResize function
This commit is contained in:
parent
57d3002ee1
commit
08356007c9
@ -28,11 +28,8 @@ private:
|
|||||||
|
|
||||||
DummySource::DummySource(const double latency,
|
DummySource::DummySource(const double latency,
|
||||||
const OutputDescr& output)
|
const OutputDescr& output)
|
||||||
: m_latency(latency), m_mat(output.dims, output.precision) {
|
: m_latency(latency) {
|
||||||
if (output.dims.size() == 1) {
|
utils::createNDMat(m_mat, output.dims, output.precision);
|
||||||
//FIXME: Well-known 1D mat WA
|
|
||||||
m_mat.dims = 1;
|
|
||||||
}
|
|
||||||
utils::generateRandom(m_mat);
|
utils::generateRandom(m_mat);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -73,7 +73,7 @@ struct DummyCall {
|
|||||||
std::shared_ptr<DummyState>& state,
|
std::shared_ptr<DummyState>& state,
|
||||||
const cv::GCompileArgs& /*args*/) {
|
const cv::GCompileArgs& /*args*/) {
|
||||||
state.reset(new DummyState{});
|
state.reset(new DummyState{});
|
||||||
state->mat.create(output.dims, output.precision);
|
utils::createNDMat(state->mat, output.dims, output.precision);
|
||||||
utils::generateRandom(state->mat);
|
utils::generateRandom(state->mat);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -14,6 +14,16 @@ struct OutputDescr {
|
|||||||
};
|
};
|
||||||
|
|
||||||
namespace utils {
|
namespace utils {
|
||||||
|
|
||||||
|
inline void createNDMat(cv::Mat& mat, const std::vector<int>& dims, int depth) {
|
||||||
|
GAPI_Assert(!dims.empty());
|
||||||
|
mat.create(dims, depth);
|
||||||
|
if (dims.size() == 1) {
|
||||||
|
//FIXME: Well-known 1D mat WA
|
||||||
|
mat.dims = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline void generateRandom(cv::Mat& out) {
|
inline void generateRandom(cv::Mat& out) {
|
||||||
switch (out.depth()) {
|
switch (out.depth()) {
|
||||||
case CV_8U:
|
case CV_8U:
|
||||||
@ -23,7 +33,12 @@ inline void generateRandom(cv::Mat& out) {
|
|||||||
cv::randu(out, 0.f, 1.f);
|
cv::randu(out, 0.f, 1.f);
|
||||||
break;
|
break;
|
||||||
case CV_16F: {
|
case CV_16F: {
|
||||||
cv::Mat fp32_mat(out.size(), CV_MAKETYPE(CV_32F, out.channels()));
|
std::vector<int> dims;
|
||||||
|
for (int i = 0; i < out.size.dims(); ++i) {
|
||||||
|
dims.push_back(out.size[i]);
|
||||||
|
}
|
||||||
|
cv::Mat fp32_mat;
|
||||||
|
createNDMat(fp32_mat, dims, CV_32F);
|
||||||
cv::randu(fp32_mat, 0.f, 1.f);
|
cv::randu(fp32_mat, 0.f, 1.f);
|
||||||
fp32_mat.convertTo(out, out.type());
|
fp32_mat.convertTo(out, out.type());
|
||||||
break;
|
break;
|
||||||
|
|||||||
@ -855,6 +855,13 @@ static void configureInputInfo(const IE::InputInfo::Ptr& ii, const cv::GMetaArg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool isApplicableForResize(const IE::TensorDesc& desc) {
|
||||||
|
const auto layout = desc.getLayout();
|
||||||
|
const auto prec = desc.getPrecision();
|
||||||
|
return (layout == IE::Layout::NCHW || layout == IE::Layout::NHWC) &&
|
||||||
|
(prec == IE::Precision::FP32 || prec == IE::Precision::U8);
|
||||||
|
}
|
||||||
|
|
||||||
static IE::PreProcessInfo configurePreProcInfo(const IE::InputInfo::CPtr& ii,
|
static IE::PreProcessInfo configurePreProcInfo(const IE::InputInfo::CPtr& ii,
|
||||||
const cv::GMetaArg& mm) {
|
const cv::GMetaArg& mm) {
|
||||||
IE::PreProcessInfo info;
|
IE::PreProcessInfo info;
|
||||||
@ -864,9 +871,7 @@ static IE::PreProcessInfo configurePreProcInfo(const IE::InputInfo::CPtr& ii,
|
|||||||
info.setColorFormat(IE::ColorFormat::NV12);
|
info.setColorFormat(IE::ColorFormat::NV12);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const auto layout = ii->getTensorDesc().getLayout();
|
if (isApplicableForResize(ii->getTensorDesc())) {
|
||||||
if (layout == IE::Layout::NCHW ||
|
|
||||||
layout == IE::Layout::NHWC) {
|
|
||||||
info.setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
info.setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
||||||
}
|
}
|
||||||
return info;
|
return info;
|
||||||
@ -986,11 +991,7 @@ struct Infer: public cv::detail::KernelTag {
|
|||||||
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NB: Configure resize only for NCHW/NHWC layout,
|
if (isApplicableForResize(ii->getTensorDesc())) {
|
||||||
// since it isn't supposed to work with others.
|
|
||||||
auto layout = ii->getTensorDesc().getLayout();
|
|
||||||
if (layout == IE::Layout::NCHW ||
|
|
||||||
layout == IE::Layout::NHWC) {
|
|
||||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1095,7 +1096,9 @@ struct InferROI: public cv::detail::KernelTag {
|
|||||||
uu.params.layer_names_to_reshape.end()) {
|
uu.params.layer_names_to_reshape.end()) {
|
||||||
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
||||||
}
|
}
|
||||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
if (isApplicableForResize(ii->getTensorDesc())) {
|
||||||
|
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
||||||
|
}
|
||||||
|
|
||||||
// FIXME: This isn't the best place to call reshape function.
|
// FIXME: This isn't the best place to call reshape function.
|
||||||
// Сorrect solution would be to do this in compile() method of network,
|
// Сorrect solution would be to do this in compile() method of network,
|
||||||
@ -1193,7 +1196,9 @@ struct InferList: public cv::detail::KernelTag {
|
|||||||
uu.params.layer_names_to_reshape.end()) {
|
uu.params.layer_names_to_reshape.end()) {
|
||||||
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
configureInputReshapeByImage(ii, mm, input_reshape_table);
|
||||||
}
|
}
|
||||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
if (isApplicableForResize(ii->getTensorDesc())) {
|
||||||
|
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: This isn't the best place to call reshape function.
|
// FIXME: This isn't the best place to call reshape function.
|
||||||
@ -1346,7 +1351,9 @@ struct InferList2: public cv::detail::KernelTag {
|
|||||||
uu.params.layer_names_to_reshape.end()) {
|
uu.params.layer_names_to_reshape.end()) {
|
||||||
configureInputReshapeByImage(ii, mm_0, input_reshape_table);
|
configureInputReshapeByImage(ii, mm_0, input_reshape_table);
|
||||||
}
|
}
|
||||||
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
if (isApplicableForResize(ii->getTensorDesc())) {
|
||||||
|
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
|
||||||
|
}
|
||||||
|
|
||||||
// FIXME: This isn't the best place to call reshape function.
|
// FIXME: This isn't the best place to call reshape function.
|
||||||
// Сorrect solution would be to do this in compile() method of network,
|
// Сorrect solution would be to do this in compile() method of network,
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user