diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp index 741fbe18f0..5dd70bd2e8 100644 --- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp +++ b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp @@ -271,6 +271,11 @@ template<> struct get_out >: public get_out>/GArray> conversion should be done more gracefully in the system +template struct get_out> >: public get_out> > +{ +}; + template struct get_out> { static U& get(GCPUContext &ctx, int idx) diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp index cc091dfa8e..0e4254cb87 100644 --- a/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -21,14 +21,45 @@ @{ @defgroup gapi_filters Graph API: Image filters @defgroup gapi_colorconvert Graph API: Converting image from one color space to another + @defgroup gapi_feature Graph API: Image Feature Detection + @defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors @} */ +namespace { +void validateFindingContoursMeta(const int depth, const int chan, const int mode) +{ + GAPI_Assert(chan == 1); + switch (mode) + { + case cv::RETR_CCOMP: + GAPI_Assert(depth == CV_8U || depth == CV_32S); + break; + case cv::RETR_FLOODFILL: + GAPI_Assert(depth == CV_32S); + break; + default: + GAPI_Assert(depth == CV_8U); + break; + } +} + +// Checks if the passed mat is a set of n-dimentional points of the given depth +bool isPointsVector(const int chan, const cv::Size &size, const int depth, + const int n, const int ddepth) +{ + return (ddepth == depth || ddepth < 0) && + ((chan == n && (size.height == 1 || size.width == 1)) || + (chan == 1 && size.width == n)); +} +} // anonymous namespace + namespace cv { namespace gapi { namespace imgproc { using GMat2 = std::tuple; using GMat3 = std::tuple; // FIXME: how to avoid this? + using GFindContoursOutput = std::tuple>,GArray>; G_TYPED_KERNEL(GFilter2D, ,"org.opencv.imgproc.filters.filter2D") { static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) { @@ -118,7 +149,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GCanny, , "org.opencv.imgproc.canny"){ + G_TYPED_KERNEL(GCanny, , "org.opencv.imgproc.feature.canny"){ static GMatDesc outMeta(GMatDesc in, double, double, int, bool) { return in.withType(CV_8U, 1); } @@ -126,12 +157,83 @@ namespace imgproc { G_TYPED_KERNEL(GGoodFeatures, (GMat,int,double,double,Mat,int,bool,double)>, - "org.opencv.imgproc.goodFeaturesToTrack") { + "org.opencv.imgproc.feature.goodFeaturesToTrack") { static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) { return empty_array_desc(); } }; + using RetrMode = RetrievalModes; + using ContMethod = ContourApproximationModes; + G_TYPED_KERNEL(GFindContours, >(GMat,RetrMode,ContMethod,GOpaque)>, + "org.opencv.imgproc.shape.findContours") + { + static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return empty_array_desc(); + } + }; + + // FIXME oc: make default value offset = Point() + G_TYPED_KERNEL(GFindContoursNoOffset, >(GMat,RetrMode,ContMethod)>, + "org.opencv.imgproc.shape.findContoursNoOffset") + { + static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return empty_array_desc(); + } + }; + + G_TYPED_KERNEL(GFindContoursH,)>, + "org.opencv.imgproc.shape.findContoursH") + { + static std::tuple + outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return std::make_tuple(empty_array_desc(), empty_array_desc()); + } + }; + + // FIXME oc: make default value offset = Point() + G_TYPED_KERNEL(GFindContoursHNoOffset,, + "org.opencv.imgproc.shape.findContoursHNoOffset") + { + static std::tuple + outMeta(GMatDesc in, RetrMode mode, ContMethod) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return std::make_tuple(empty_array_desc(), empty_array_desc()); + } + }; + + G_TYPED_KERNEL(GBoundingRectMat, (GMat)>, + "org.opencv.imgproc.shape.boundingRectMat") { + static GOpaqueDesc outMeta(GMatDesc in) { + GAPI_Assert((in.depth == CV_8U && in.chan == 1) || + (isPointsVector(in.chan, in.size, in.depth, 2, CV_32S) || + isPointsVector(in.chan, in.size, in.depth, 2, CV_32F))); + + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBoundingRectVector32S, (GArray)>, + "org.opencv.imgproc.shape.boundingRectVector32S") { + static GOpaqueDesc outMeta(GArrayDesc) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBoundingRectVector32F, (GArray)>, + "org.opencv.imgproc.shape.boundingRectVector32F") { + static GOpaqueDesc outMeta(GArrayDesc) { + return empty_gopaque_desc(); + } + }; + G_TYPED_KERNEL(GBGR2RGB, , "org.opencv.imgproc.colorconvert.bgr2rgb") { static GMatDesc outMeta(GMatDesc in) { return in; // type still remains CV_8UC3; @@ -280,7 +382,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toRGBp, , "org.opencv.colorconvert.imgproc.nv12torgbp") { + G_TYPED_KERNEL(GNV12toRGBp, , "org.opencv.imgproc.colorconvert.nv12torgbp") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -294,7 +396,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toGray, , "org.opencv.colorconvert.imgproc.nv12togray") { + G_TYPED_KERNEL(GNV12toGray, , "org.opencv.imgproc.colorconvert.nv12togray") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -309,7 +411,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toBGRp, , "org.opencv.colorconvert.imgproc.nv12tobgrp") { + G_TYPED_KERNEL(GNV12toBGRp, , "org.opencv.imgproc.colorconvert.nv12tobgrp") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -800,6 +902,10 @@ proportional to sigmaSpace. GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT); +//! @} gapi_filters + +//! @addtogroup gapi_feature +//! @{ /** @brief Finds edges in an image using the Canny algorithm. The function finds edges in the input image and marks them in the output map edges using the @@ -807,7 +913,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo largest value is used to find initial segments of strong edges. See -@note Function textual ID is "org.opencv.imgproc.filters.canny" +@note Function textual ID is "org.opencv.imgproc.feature.canny" @param image 8-bit input image. @param threshold1 first threshold for the hysteresis procedure. @@ -842,7 +948,7 @@ The function can be used to initialize a point-based tracker of an object. A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector with qualityLevel=B . -@note Function textual ID is "org.opencv.imgproc.goodFeaturesToTrack" +@note Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack" @param image Input 8-bit or floating-point 32-bit, single-channel image. @param maxCorners Maximum number of corners to return. If there are more corners than are found, @@ -876,6 +982,8 @@ GAPI_EXPORTS GArray goodFeaturesToTrack(const GMat &image, /** @brief Equalizes the histogram of a grayscale image. +//! @} gapi_feature + The function equalizes the histogram of the input image using the following algorithm: - Calculate the histogram \f$H\f$ for src . @@ -893,6 +1001,120 @@ The algorithm normalizes the brightness and increases the contrast of the image. */ GAPI_EXPORTS GMat equalizeHist(const GMat& src); +//! @addtogroup gapi_shape +//! @{ +/** @brief Finds contours in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 . +The contours are a useful tool for shape analysis and object detection and recognition. +See squares.cpp in the OpenCV sample directory. + +@note Function textual ID is "org.opencv.imgproc.shape.findContours" + +@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer +image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + +@return GArray of detected contours. Each contour is stored as a GArray of points. + */ +GAPI_EXPORTS GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset); + +// FIXME oc: make default value offset = Point() +/** @overload +@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset" + */ +GAPI_EXPORTS GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method); + +/** @brief Finds contours and their hierarchy in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 +and calculates their hierarchy. +The contours are a useful tool for shape analysis and object detection and recognition. +See squares.cpp in the OpenCV sample directory. + +@note Function textual ID is "org.opencv.imgproc.shape.findContoursH" + +@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer +image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + +@return GArray of detected contours. Each contour is stored as a GArray of points. +@return Optional output GArray of cv::Vec4i, containing information about the image topology. +It has as many elements as the number of contours. For each i-th contour contours[i], the elements +hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based +indices in contours of the next and previous contours at the same hierarchical level, the first +child contour and the parent contour, respectively. If for the contour i there are no next, +previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. + */ +GAPI_EXPORTS std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset); + +// FIXME oc: make default value offset = Point() +/** @overload +@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset" + */ +GAPI_EXPORTS std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method); + +/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels +of gray-scale image. + +The function calculates and returns the minimal up-right bounding rectangle for the specified +point set or non-zero pixels of gray-scale image. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectMat" + +@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F +2D points stored in Mat. + +@note In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column +if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either +@ref CV_32S or @ref CV_32F depth + */ +GAPI_EXPORTS GOpaque boundingRect(const GMat& src); + +/** @overload + +Calculates the up-right bounding rectangle of a point set. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S" + +@param src Input 2D point set, stored in std::vector. + */ +GAPI_EXPORTS GOpaque boundingRect(const GArray& src); + +/** @overload + +Calculates the up-right bounding rectangle of a point set. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F" + +@param src Input 2D point set, stored in std::vector. + */ +GAPI_EXPORTS GOpaque boundingRect(const GArray& src); + +//! @} gapi_shape + +//! @addtogroup gapi_colorconvert +//! @{ /** @brief Converts an image from BGR color space to RGB color space. The function converts an input image from BGR color space to RGB. @@ -907,10 +1129,6 @@ Output image is 8-bit unsigned 3-channel image @ref CV_8UC3. */ GAPI_EXPORTS GMat BGR2RGB(const GMat& src); -//! @} gapi_filters - -//! @addtogroup gapi_colorconvert -//! @{ /** @brief Converts an image from RGB color space to gray-scaled. The conventional ranges for R, G, and B channel values are 0 to 255. Resulting gray color value computed as diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp index 9a5b07c14a..faf8de54c7 100644 --- a/modules/gapi/src/api/kernels_imgproc.cpp +++ b/modules/gapi/src/api/kernels_imgproc.cpp @@ -122,6 +122,48 @@ cv::GArray goodFeaturesToTrack(const GMat& image, int maxCorners, d useHarrisDetector, k); } +GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset) +{ + return imgproc::GFindContours::on(src, mode, method, offset); +} + +GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method) +{ + return imgproc::GFindContoursNoOffset::on(src, mode, method); +} + + +std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset) +{ + return imgproc::GFindContoursH::on(src, mode, method, offset); +} + +std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method) +{ + return imgproc::GFindContoursHNoOffset::on(src, mode, method); +} + +GOpaque boundingRect(const GMat& src) +{ + return imgproc::GBoundingRectMat::on(src); +} + +GOpaque boundingRect(const GArray& src) +{ + return imgproc::GBoundingRectVector32S::on(src); +} + +GOpaque boundingRect(const GArray& src) +{ + return imgproc::GBoundingRectVector32F::on(src); +} + GMat BGR2RGB(const GMat& src) { return imgproc::GBGR2RGB::on(src); diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp index a3c4e1b60f..9eca0f12f0 100644 --- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp +++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp @@ -221,6 +221,70 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures) } }; +GAPI_OCV_KERNEL(GCPUFindContours, cv::gapi::imgproc::GFindContours) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, const cv::Point& offset, + std::vector> &outConts) + { + cv::findContours(image, outConts, mode, method, offset); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursNoOffset, cv::gapi::imgproc::GFindContoursNoOffset) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, + std::vector> &outConts) + { + cv::findContours(image, outConts, mode, method); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursH, cv::gapi::imgproc::GFindContoursH) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, const cv::Point& offset, + std::vector> &outConts, std::vector &outHier) + { + cv::findContours(image, outConts, outHier, mode, method, offset); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursHNoOffset, cv::gapi::imgproc::GFindContoursHNoOffset) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, + std::vector> &outConts, std::vector &outHier) + { + cv::findContours(image, outConts, outHier, mode, method); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectMat, cv::gapi::imgproc::GBoundingRectMat) +{ + static void run(const cv::Mat& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectVector32S, cv::gapi::imgproc::GBoundingRectVector32S) +{ + static void run(const std::vector& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVector32F) +{ + static void run(const std::vector& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB) { static void run(const cv::Mat& in, cv::Mat &out) @@ -496,8 +560,15 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUCanny , GCPUGoodFeatures , GCPUEqualizeHist + , GCPUFindContours + , GCPUFindContoursNoOffset + , GCPUFindContoursH + , GCPUFindContoursHNoOffset , GCPUBGR2RGB , GCPURGB2YUV + , GCPUBoundingRectMat + , GCPUBoundingRectVector32S + , GCPUBoundingRectVector32F , GCPUYUV2RGB , GCPUBGR2I420 , GCPURGB2I420 diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index d562b306c2..b27da28c87 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -66,6 +66,21 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest, double,int,bool), 8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursNoOffsetTest, + FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes, + cv::ContourApproximationModes), + 4, sz, type, mode, method) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursOffsetTest, <>, 0) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHNoOffsetTest, + FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes, + cv::ContourApproximationModes), + 4, sz, type, mode, method) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHOffsetTest, <>, 0) +GAPI_TEST_FIXTURE(BoundingRectMatTest, initMatrixRandU, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectMatVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectMatVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp index c087733fa8..91e676c5e7 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp @@ -50,6 +50,27 @@ namespace rgb2yuyv(in_line_p, out_line_p, in.cols); } } + + // Draw random ellipses on given mat of given size and type + void initMatForFindingContours(cv::Mat& mat, const cv::Size& sz, const int type) + { + cv::RNG& rng = theRNG(); + mat = cv::Mat(sz, type, cv::Scalar::all(0)); + size_t numEllipses = rng.uniform(1, 10); + + for( size_t i = 0; i < numEllipses; i++ ) + { + cv::Point center; + cv::Size axes; + center.x = rng.uniform(0, sz.width); + center.y = rng.uniform(0, sz.height); + axes.width = rng.uniform(2, sz.width); + axes.height = rng.uniform(2, sz.height); + int color = rng.uniform(1, 256); + double angle = rng.uniform(0., 180.); + cv::ellipse(mat, center, axes, angle, 0., 360., color, 1, FILLED); + } + } } TEST_P(Filter2DTest, AccuracyTest) @@ -470,6 +491,267 @@ TEST_P(GoodFeaturesTest, AccuracyTest) } } +TEST_P(FindContoursNoOffsetTest, AccuracyTest) +{ + std::vector> outCtsOCV, outCtsGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, mode, method); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GArray> outCts; + outCts = cv::gapi::findContours(in, mode, method); + cv::GComputation c(GIn(in), GOut(outCts)); + c.apply(gin(in_mat1), gout(outCtsGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); +} + +TEST_P(FindContoursOffsetTest, AccuracyTest) +{ + const cv::Size sz(1280, 720); + const MatType2 type = CV_8UC1; + const cv::RetrievalModes mode = cv::RETR_EXTERNAL; + const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE; + const cv::Point offset(15, 15); + std::vector> outCtsOCV, outCtsGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, mode, method, offset); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + GOpaque gOffset; + cv::GArray> outCts; + outCts = cv::gapi::findContours(in, mode, method, gOffset); + cv::GComputation c(GIn(in, gOffset), GOut(outCts)); + c.apply(gin(in_mat1, offset), gout(outCtsGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); +} + +TEST_P(FindContoursHNoOffsetTest, AccuracyTest) +{ + std::vector> outCtsOCV, outCtsGAPI; + std::vector outHierOCV, outHierGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GArray> outCts; + cv::GArray outHier; + std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method); + cv::GComputation c(GIn(in), GOut(outCts, outHier)); + c.apply(gin(in_mat1), gout(outCtsGAPI, outHierGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); + + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + EXPECT_TRUE(AbsExactVector().to_compare_f()(outHierOCV, outHierGAPI)); +} + +TEST_P(FindContoursHOffsetTest, AccuracyTest) +{ + const cv::Size sz(1280, 720); + const MatType2 type = CV_8UC1; + const cv::RetrievalModes mode = cv::RETR_EXTERNAL; + const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE; + const cv::Point offset(15, 15); + std::vector> outCtsOCV, outCtsGAPI; + std::vector outHierOCV, outHierGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method, offset); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + GOpaque gOffset; + cv::GArray> outCts; + cv::GArray outHier; + std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method, gOffset); + cv::GComputation c(GIn(in, gOffset), GOut(outCts, outHier)); + c.apply(gin(in_mat1, offset), gout(outCtsGAPI, outHierGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); + + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + EXPECT_TRUE(AbsExactVector().to_compare_f()(outHierOCV, outHierGAPI)); +} + +TEST_P(BoundingRectMatTest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectMatVector32STest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorS(sz.width); + cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255)); + in_mat1 = cv::Mat(in_vectorS); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectMatVector32FTest, AccuracyTest) +{ + cv::RNG& rng = theRNG(); + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorF(sz.width); + const int fscale = 256; // avoid bits near ULP, generate stable test input + for (int i = 0; i < sz.width; i++) + { + cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + in_vectorF.push_back(pt); + } + in_mat1 = cv::Mat(in_vectorF); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + + +TEST_P(BoundingRectVector32STest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorS(sz.width); + cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255)); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vectorS), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_vectorS); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectVector32FTest, AccuracyTest) +{ + cv::RNG& rng = theRNG(); + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorF(sz.width); + const int fscale = 256; // avoid bits near ULP, generate stable test input + for (int i = 0; i < sz.width; i++) + { + cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + in_vectorF.push_back(pt); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vectorF), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_vectorF); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + TEST_P(BGR2RGBTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp index bb045b83d1..948476fa10 100644 --- a/modules/gapi/test/common/gapi_tests_common.hpp +++ b/modules/gapi/test/common/gapi_tests_common.hpp @@ -463,6 +463,7 @@ struct TestWithParamsSpecific : public TestWithParamsBase; using compare_scalar_f = std::function; +using compare_rect_f = std::function; template using compare_vector_f = std::function &a, @@ -489,6 +490,7 @@ private: using CompareMats = CompareF; using CompareScalars = CompareF; +using CompareRects = CompareF; template using CompareVectors = CompareF, std::vector>; @@ -535,6 +537,27 @@ struct WrappableScalar } }; +template +struct WrappableRect +{ + compare_rect_f to_compare_f() + { + T t = *static_cast(this); + return [t](const cv::Rect &a, const cv::Rect &b) + { + return t(a, b); + }; + } + + CompareRects to_compare_obj() + { + T t = *static_cast(this); + std::stringstream ss; + ss << t; + return CompareRects(to_compare_f(), ss.str()); + } +}; + template struct WrappableVector { @@ -719,13 +742,15 @@ public: double err_Inf = cv::norm(in1, in2, NORM_INF); if (err_Inf > _inf_tol) { - std::cout << "ToleranceColor error: err_Inf=" << err_Inf << " tolerance=" << _inf_tol << std::endl;; + std::cout << "ToleranceColor error: err_Inf=" << err_Inf + << " tolerance=" << _inf_tol << std::endl; return false; } double err = cv::norm(in1, in2, NORM_L1 | NORM_RELATIVE); if (err > _tol) { - std::cout << "ToleranceColor error: err=" << err << " tolerance=" << _tol << std::endl;; + std::cout << "ToleranceColor error: err=" << err + << " tolerance=" << _tol << std::endl; return false; } } @@ -749,7 +774,8 @@ public: double abs_err = std::abs(in1[0] - in2[0]) / std::max(1.0, std::abs(in2[0])); if (abs_err > _tol) { - std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << " tolerance=" << _tol << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl;; + std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << " tolerance=" << _tol + << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl; return false; } else @@ -765,6 +791,46 @@ private: double _tol; }; +class IoUToleranceRect : public WrappableRect +{ +public: + IoUToleranceRect(double tol) : _tol(tol) {} + bool operator() (const cv::Rect& in1, const cv::Rect& in2) const + { + // determine the (x, y)-coordinates of the intersection rectangle + int xA = max(in1.x, in2.x); + int yA = max(in1.y, in2.y); + int xB = min(in1.br().x, in2.br().x); + int yB = min(in1.br().y, in2.br().y); + // compute the area of intersection rectangle + int interArea = max(0, xB - xA) * max(0, yB - yA); + // compute the area of union rectangle + int unionArea = in1.area() + in2.area() - interArea; + + double iou = interArea / unionArea; + double err = 1 - iou; + if (err > _tol) + { + std::cout << "IoUToleranceRect error: err=" << err << " tolerance=" << _tol + << " in1.x=" << in1.x << " in2.x=" << in2.x + << " in1.y=" << in1.y << " in2.y=" << in2.y + << " in1.width=" << in1.width << " in2.width=" << in2.width + << " in1.height=" << in1.height << " in2.height=" << in2.height << std::endl; + return false; + } + else + { + return true; + } + } + friend std::ostream& operator<<(std::ostream& os, const IoUToleranceRect& obj) + { + return os << "IoUToleranceRect(" << std::to_string(obj._tol) << ")"; + } +private: + double _tol; +}; + template class AbsExactVector : public WrappableVector, Elem> { @@ -803,6 +869,11 @@ inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_sca return os << "compare_scalar_f"; } +inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_rect_f&) +{ + return os << "compare_rect_f"; +} + template inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_vector_f&) { @@ -849,6 +920,37 @@ inline std::ostream& operator<<(std::ostream& os, NormTypes op) return os; } +inline std::ostream& operator<<(std::ostream& os, RetrievalModes op) +{ +#define CASE(v) case RetrievalModes::v: os << #v; break + switch (op) + { + CASE(RETR_EXTERNAL); + CASE(RETR_LIST); + CASE(RETR_CCOMP); + CASE(RETR_TREE); + CASE(RETR_FLOODFILL); + default: GAPI_Assert(false && "unknown RetrievalModes value"); + } +#undef CASE + return os; +} + +inline std::ostream& operator<<(std::ostream& os, ContourApproximationModes op) +{ +#define CASE(v) case ContourApproximationModes::v: os << #v; break + switch (op) + { + CASE(CHAIN_APPROX_NONE); + CASE(CHAIN_APPROX_SIMPLE); + CASE(CHAIN_APPROX_TC89_L1); + CASE(CHAIN_APPROX_TC89_KCOS); + default: GAPI_Assert(false && "unknown ContourApproximationModes value"); + } +#undef CASE + return os; +} + inline std::ostream& operator<<(std::ostream& os, MorphTypes op) { #define CASE(v) case MorphTypes::v: os << #v; break diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp index 7cba6b05db..cea0e0da32 100644 --- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp +++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp @@ -265,6 +265,78 @@ INSTANTIATE_TEST_CASE_P(GoodFeaturesInternalTestCPU, GoodFeaturesTest, Values(3), Values(true))); +INSTANTIATE_TEST_CASE_P(FindContoursNoOffsetTestCPU, FindContoursNoOffsetTest, + Combine(Values(IMGPROC_CPU), + Values(cv::Size(1280, 720)), + Values(CV_8UC1), + Values(RETR_EXTERNAL), + Values(CHAIN_APPROX_NONE))); + +INSTANTIATE_TEST_CASE_P(FindContoursOffsetTestCPU, FindContoursOffsetTest, + Values(IMGPROC_CPU)); + +INSTANTIATE_TEST_CASE_P(FindContoursHNoOffsetTestCPU, FindContoursHNoOffsetTest, + Combine(Values(IMGPROC_CPU), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC1), + Values(RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE), + Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS))); + +INSTANTIATE_TEST_CASE_P(FindContoursHNoOffset32STestCPU, FindContoursHNoOffsetTest, + Combine(Values(IMGPROC_CPU), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_32SC1), + Values(RETR_CCOMP, RETR_FLOODFILL), + Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS))); + +INSTANTIATE_TEST_CASE_P(FindContoursHOffsetTestCPU, FindContoursHOffsetTest, + Values(IMGPROC_CPU)); + +INSTANTIATE_TEST_CASE_P(BoundingRectMatTestCPU, BoundingRectMatTest, + Combine(Values( CV_8UC1 ), + Values(cv::Size(1280, 720), + cv::Size(640, 480), + cv::Size(128, 128)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(0).to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32STestCPU, BoundingRectMatVector32STest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(0).to_compare_obj()))); + + INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32FTestCPU, BoundingRectMatVector32FTest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(1e-5).to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(BoundingRectVector32STestCPU, BoundingRectVector32STest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(0).to_compare_obj()))); + + INSTANTIATE_TEST_CASE_P(BoundingRectVector32FTestCPU, BoundingRectVector32FTest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(1e-5).to_compare_obj()))); + INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest, Combine(Values(CV_8UC3), Values(cv::Size(1280, 720),