From aa08900ac8c3399534d57f55b7e7b3b1a6d8c132 Mon Sep 17 00:00:00 2001 From: Liubov Batanina Date: Tue, 21 Apr 2020 10:34:56 +0300 Subject: [PATCH 01/18] Supported TF concat 3d --- modules/dnn/src/tensorflow/tf_importer.cpp | 10 ++++++++++ modules/dnn/test/test_tf_importer.cpp | 1 + 2 files changed, 11 insertions(+) diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index 0dd21770a4..6aadc57f40 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -46,6 +46,14 @@ static int toNCHW(int idx) else return (4 + idx) % 3 + 1; } +static int toNCDHW(int idx) +{ + CV_Assert(-5 <= idx && idx < 5); + if (idx == 0) return 0; + else if (idx > 0) return idx % 4 + 1; + else return (5 + idx) % 4 + 1; +} + // This values are used to indicate layer output's data layout where it's possible. enum DataLayout { @@ -1313,6 +1321,8 @@ void TFImporter::populateNet(Net dstNet) if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC) axis = toNCHW(axis); + else if (getDataLayout(name, data_layouts) == DATA_LAYOUT_NDHWC) + axis = toNCDHW(axis); layerParams.set("axis", axis); // input(0) or input(n-1) is concat_dim diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index b71dfbc84a..8c487434a0 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -196,6 +196,7 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat) TEST_P(Test_TensorFlow_layers, concat_axis_1) { runTensorFlowNet("concat_axis_1"); + runTensorFlowNet("concat_3d"); } TEST_P(Test_TensorFlow_layers, batch_norm_1) From 1c1762d3f678f31ca402ade3b7ef9603c30eeaa7 Mon Sep 17 00:00:00 2001 From: Liubov Batanina Date: Wed, 22 Apr 2020 09:52:20 +0300 Subject: [PATCH 02/18] Skip myriad --- modules/dnn/test/test_tf_importer.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 8c487434a0..9e30bcafee 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -196,6 +196,8 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat) TEST_P(Test_TensorFlow_layers, concat_axis_1) { runTensorFlowNet("concat_axis_1"); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); runTensorFlowNet("concat_3d"); } From 4bf94cb5d104d348583a393fa6fca0ebda5a6e7f Mon Sep 17 00:00:00 2001 From: Liubov Batanina Date: Sun, 26 Apr 2020 20:42:11 +0300 Subject: [PATCH 03/18] Fix test --- modules/dnn/test/test_tf_importer.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 9e30bcafee..8c4558bf4d 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -196,8 +196,20 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat) TEST_P(Test_TensorFlow_layers, concat_axis_1) { runTensorFlowNet("concat_axis_1"); - if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +} + +TEST_P(Test_TensorFlow_layers, concat_3d) +{ + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + { + if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); + if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); + } + + if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH || + backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + runTensorFlowNet("concat_3d"); } From 7093752cb581d9e7ec58d95eb8efabdce036d71b Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 27 Apr 2020 18:17:15 +0000 Subject: [PATCH 04/18] features2d: copy sift.simd.hpp --- modules/features2d/src/{sift.cpp => sift.simd.hpp} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename modules/features2d/src/{sift.cpp => sift.simd.hpp} (100%) diff --git a/modules/features2d/src/sift.cpp b/modules/features2d/src/sift.simd.hpp similarity index 100% rename from modules/features2d/src/sift.cpp rename to modules/features2d/src/sift.simd.hpp From 9926a93a781d86860bd0af001d8d7b31b2b5c62d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 27 Apr 2020 18:18:16 +0000 Subject: [PATCH 05/18] features2d: copy sift.dispatch.cpp --- modules/features2d/src/{sift.cpp => sift.dispatch.cpp} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename modules/features2d/src/{sift.cpp => sift.dispatch.cpp} (100%) diff --git a/modules/features2d/src/sift.cpp b/modules/features2d/src/sift.dispatch.cpp similarity index 100% rename from modules/features2d/src/sift.cpp rename to modules/features2d/src/sift.dispatch.cpp From 74e4cfd1da06fe25240b725ed6c8948a13b5a722 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 1 May 2020 07:29:10 +0000 Subject: [PATCH 06/18] core(MatExpr): fix warning in case of e.s == (0, 0, 0, 0) --- modules/core/src/matrix_expressions.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/src/matrix_expressions.cpp b/modules/core/src/matrix_expressions.cpp index 58c99ed19a..44ac8f1713 100644 --- a/modules/core/src/matrix_expressions.cpp +++ b/modules/core/src/matrix_expressions.cpp @@ -1329,7 +1329,7 @@ void MatOp_AddEx::assign(const MatExpr& e, Mat& m, int _type) const } else if( e.s.isReal() && (dst.data != m.data || fabs(e.alpha) != 1)) { - if (e.a.channels() > 1) + if (e.a.channels() > 1 && e.s[0] != 0.0) CV_LOG_ONCE_WARNING(NULL, "OpenCV/MatExpr: processing of multi-channel arrays might be changed in the future: " "https://github.com/opencv/opencv/issues/16739"); e.a.convertTo(m, _type, e.alpha, e.s[0]); From ea04f9d12e7466d3f778dccf652539bca7dd9154 Mon Sep 17 00:00:00 2001 From: hn-88 Date: Sun, 3 May 2020 11:19:04 +0530 Subject: [PATCH 07/18] to make OpenCV compile on mingw32 added #define NO_DSHOW_STRSAFE --- modules/videoio/src/cap_dshow.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/videoio/src/cap_dshow.cpp b/modules/videoio/src/cap_dshow.cpp index 2da182c19b..e29cf38341 100644 --- a/modules/videoio/src/cap_dshow.cpp +++ b/modules/videoio/src/cap_dshow.cpp @@ -108,6 +108,7 @@ Thanks to: #include //Include Directshow stuff here so we don't worry about needing all the h files. +#define NO_DSHOW_STRSAFE #include "DShow.h" #include "strmif.h" #include "Aviriff.h" From 1f9713195b99b8c38296581704ca8289d5e434d4 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 28 Apr 2020 16:42:34 +0000 Subject: [PATCH 08/18] features2d(sift): enable runtime dispatching --- modules/features2d/CMakeLists.txt | 3 + modules/features2d/src/sift.dispatch.cpp | 678 +---------------------- modules/features2d/src/sift.simd.hpp | 495 +++-------------- 3 files changed, 101 insertions(+), 1075 deletions(-) diff --git a/modules/features2d/CMakeLists.txt b/modules/features2d/CMakeLists.txt index e92309db0f..1d29320a14 100644 --- a/modules/features2d/CMakeLists.txt +++ b/modules/features2d/CMakeLists.txt @@ -1,4 +1,7 @@ set(the_description "2D Features Framework") + +ocv_add_dispatched_file(sift SSE4_1 AVX2 AVX512_SKX) + set(debug_modules "") if(DEBUG_opencv_features2d) list(APPEND debug_modules opencv_highgui) diff --git a/modules/features2d/src/sift.dispatch.cpp b/modules/features2d/src/sift.dispatch.cpp index 81254ecbd8..b9ab704804 100644 --- a/modules/features2d/src/sift.dispatch.cpp +++ b/modules/features2d/src/sift.dispatch.cpp @@ -70,14 +70,13 @@ \**********************************************************************************************/ #include "precomp.hpp" -#include -#include #include - #include -namespace cv -{ +#include "sift.simd.hpp" +#include "sift.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content + +namespace cv { /*! SIFT implementation. @@ -127,55 +126,6 @@ Ptr SIFT::create( int _nfeatures, int _nOctaveLayers, return makePtr(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma); } -/******************************* Defs and macros *****************************/ - -// default width of descriptor histogram array -static const int SIFT_DESCR_WIDTH = 4; - -// default number of bins per histogram in descriptor array -static const int SIFT_DESCR_HIST_BINS = 8; - -// assumed gaussian blur for input image -static const float SIFT_INIT_SIGMA = 0.5f; - -// width of border in which to ignore keypoints -static const int SIFT_IMG_BORDER = 5; - -// maximum steps of keypoint interpolation before failure -static const int SIFT_MAX_INTERP_STEPS = 5; - -// default number of bins in histogram for orientation assignment -static const int SIFT_ORI_HIST_BINS = 36; - -// determines gaussian sigma for orientation assignment -static const float SIFT_ORI_SIG_FCTR = 1.5f; - -// determines the radius of the region used in orientation assignment -static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR; - -// orientation magnitude relative to max that results in new feature -static const float SIFT_ORI_PEAK_RATIO = 0.8f; - -// determines the size of a single descriptor orientation histogram -static const float SIFT_DESCR_SCL_FCTR = 3.f; - -// threshold on magnitude of elements of descriptor vector -static const float SIFT_DESCR_MAG_THR = 0.2f; - -// factor used to convert floating-point descriptor to unsigned char -static const float SIFT_INT_DESCR_FCTR = 512.f; - -#define DoG_TYPE_SHORT 0 -#if DoG_TYPE_SHORT -// intermediate type used for DoG pyramids -typedef short sift_wt; -static const int SIFT_FIXPT_SCALE = 48; -#else -// intermediate type used for DoG pyramids -typedef float sift_wt; -static const int SIFT_FIXPT_SCALE = 1; -#endif - static inline void unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale) { @@ -311,249 +261,6 @@ void SIFT_Impl::buildDoGPyramid( const std::vector& gpyr, std::vector& parallel_for_(Range(0, nOctaves * (nOctaveLayers + 2)), buildDoGPyramidComputer(nOctaveLayers, gpyr, dogpyr)); } -// Computes a gradient orientation histogram at a specified pixel -static float calcOrientationHist( const Mat& img, Point pt, int radius, - float sigma, float* hist, int n ) -{ - CV_TRACE_FUNCTION(); - - int i, j, k, len = (radius*2+1)*(radius*2+1); - - float expf_scale = -1.f/(2.f * sigma * sigma); - AutoBuffer buf(len*4 + n+4); - float *X = buf.data(), *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len; - float* temphist = W + len + 2; - - for( i = 0; i < n; i++ ) - temphist[i] = 0.f; - - for( i = -radius, k = 0; i <= radius; i++ ) - { - int y = pt.y + i; - if( y <= 0 || y >= img.rows - 1 ) - continue; - for( j = -radius; j <= radius; j++ ) - { - int x = pt.x + j; - if( x <= 0 || x >= img.cols - 1 ) - continue; - - float dx = (float)(img.at(y, x+1) - img.at(y, x-1)); - float dy = (float)(img.at(y-1, x) - img.at(y+1, x)); - - X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale; - k++; - } - } - - len = k; - - // compute gradient values, orientations and the weights over the pixel neighborhood - cv::hal::exp32f(W, W, len); - cv::hal::fastAtan2(Y, X, Ori, len, true); - cv::hal::magnitude32f(X, Y, Mag, len); - - k = 0; -#if CV_AVX2 - { - __m256 __nd360 = _mm256_set1_ps(n/360.f); - __m256i __n = _mm256_set1_epi32(n); - int CV_DECL_ALIGNED(32) bin_buf[8]; - float CV_DECL_ALIGNED(32) w_mul_mag_buf[8]; - for ( ; k <= len - 8; k+=8 ) - { - __m256i __bin = _mm256_cvtps_epi32(_mm256_mul_ps(__nd360, _mm256_loadu_ps(&Ori[k]))); - - __bin = _mm256_sub_epi32(__bin, _mm256_andnot_si256(_mm256_cmpgt_epi32(__n, __bin), __n)); - __bin = _mm256_add_epi32(__bin, _mm256_and_si256(__n, _mm256_cmpgt_epi32(_mm256_setzero_si256(), __bin))); - - __m256 __w_mul_mag = _mm256_mul_ps(_mm256_loadu_ps(&W[k]), _mm256_loadu_ps(&Mag[k])); - - _mm256_store_si256((__m256i *) bin_buf, __bin); - _mm256_store_ps(w_mul_mag_buf, __w_mul_mag); - - temphist[bin_buf[0]] += w_mul_mag_buf[0]; - temphist[bin_buf[1]] += w_mul_mag_buf[1]; - temphist[bin_buf[2]] += w_mul_mag_buf[2]; - temphist[bin_buf[3]] += w_mul_mag_buf[3]; - temphist[bin_buf[4]] += w_mul_mag_buf[4]; - temphist[bin_buf[5]] += w_mul_mag_buf[5]; - temphist[bin_buf[6]] += w_mul_mag_buf[6]; - temphist[bin_buf[7]] += w_mul_mag_buf[7]; - } - } -#endif - for( ; k < len; k++ ) - { - int bin = cvRound((n/360.f)*Ori[k]); - if( bin >= n ) - bin -= n; - if( bin < 0 ) - bin += n; - temphist[bin] += W[k]*Mag[k]; - } - - // smooth the histogram - temphist[-1] = temphist[n-1]; - temphist[-2] = temphist[n-2]; - temphist[n] = temphist[0]; - temphist[n+1] = temphist[1]; - - i = 0; -#if CV_AVX2 - { - __m256 __d_1_16 = _mm256_set1_ps(1.f/16.f); - __m256 __d_4_16 = _mm256_set1_ps(4.f/16.f); - __m256 __d_6_16 = _mm256_set1_ps(6.f/16.f); - for( ; i <= n - 8; i+=8 ) - { -#if CV_FMA3 - __m256 __hist = _mm256_fmadd_ps( - _mm256_add_ps(_mm256_loadu_ps(&temphist[i-2]), _mm256_loadu_ps(&temphist[i+2])), - __d_1_16, - _mm256_fmadd_ps( - _mm256_add_ps(_mm256_loadu_ps(&temphist[i-1]), _mm256_loadu_ps(&temphist[i+1])), - __d_4_16, - _mm256_mul_ps(_mm256_loadu_ps(&temphist[i]), __d_6_16))); -#else - __m256 __hist = _mm256_add_ps( - _mm256_mul_ps( - _mm256_add_ps(_mm256_loadu_ps(&temphist[i-2]), _mm256_loadu_ps(&temphist[i+2])), - __d_1_16), - _mm256_add_ps( - _mm256_mul_ps( - _mm256_add_ps(_mm256_loadu_ps(&temphist[i-1]), _mm256_loadu_ps(&temphist[i+1])), - __d_4_16), - _mm256_mul_ps(_mm256_loadu_ps(&temphist[i]), __d_6_16))); -#endif - _mm256_storeu_ps(&hist[i], __hist); - } - } -#endif - for( ; i < n; i++ ) - { - hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) + - (temphist[i-1] + temphist[i+1])*(4.f/16.f) + - temphist[i]*(6.f/16.f); - } - - float maxval = hist[0]; - for( i = 1; i < n; i++ ) - maxval = std::max(maxval, hist[i]); - - return maxval; -} - - -// -// Interpolates a scale-space extremum's location and scale to subpixel -// accuracy to form an image feature. Rejects features with low contrast. -// Based on Section 4 of Lowe's paper. -static bool adjustLocalExtrema( const std::vector& dog_pyr, KeyPoint& kpt, int octv, - int& layer, int& r, int& c, int nOctaveLayers, - float contrastThreshold, float edgeThreshold, float sigma ) -{ - CV_TRACE_FUNCTION(); - - const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE); - const float deriv_scale = img_scale*0.5f; - const float second_deriv_scale = img_scale; - const float cross_deriv_scale = img_scale*0.25f; - - float xi=0, xr=0, xc=0, contr=0; - int i = 0; - - for( ; i < SIFT_MAX_INTERP_STEPS; i++ ) - { - int idx = octv*(nOctaveLayers+2) + layer; - const Mat& img = dog_pyr[idx]; - const Mat& prev = dog_pyr[idx-1]; - const Mat& next = dog_pyr[idx+1]; - - Vec3f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, - (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, - (next.at(r, c) - prev.at(r, c))*deriv_scale); - - float v2 = (float)img.at(r, c)*2; - float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; - float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; - float dss = (next.at(r, c) + prev.at(r, c) - v2)*second_deriv_scale; - float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - - img.at(r-1, c+1) + img.at(r-1, c-1))*cross_deriv_scale; - float dxs = (next.at(r, c+1) - next.at(r, c-1) - - prev.at(r, c+1) + prev.at(r, c-1))*cross_deriv_scale; - float dys = (next.at(r+1, c) - next.at(r-1, c) - - prev.at(r+1, c) + prev.at(r-1, c))*cross_deriv_scale; - - Matx33f H(dxx, dxy, dxs, - dxy, dyy, dys, - dxs, dys, dss); - - Vec3f X = H.solve(dD, DECOMP_LU); - - xi = -X[2]; - xr = -X[1]; - xc = -X[0]; - - if( std::abs(xi) < 0.5f && std::abs(xr) < 0.5f && std::abs(xc) < 0.5f ) - break; - - if( std::abs(xi) > (float)(INT_MAX/3) || - std::abs(xr) > (float)(INT_MAX/3) || - std::abs(xc) > (float)(INT_MAX/3) ) - return false; - - c += cvRound(xc); - r += cvRound(xr); - layer += cvRound(xi); - - if( layer < 1 || layer > nOctaveLayers || - c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER || - r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER ) - return false; - } - - // ensure convergence of interpolation - if( i >= SIFT_MAX_INTERP_STEPS ) - return false; - - { - int idx = octv*(nOctaveLayers+2) + layer; - const Mat& img = dog_pyr[idx]; - const Mat& prev = dog_pyr[idx-1]; - const Mat& next = dog_pyr[idx+1]; - Matx31f dD((img.at(r, c+1) - img.at(r, c-1))*deriv_scale, - (img.at(r+1, c) - img.at(r-1, c))*deriv_scale, - (next.at(r, c) - prev.at(r, c))*deriv_scale); - float t = dD.dot(Matx31f(xc, xr, xi)); - - contr = img.at(r, c)*img_scale + t * 0.5f; - if( std::abs( contr ) * nOctaveLayers < contrastThreshold ) - return false; - - // principal curvatures are computed using the trace and det of Hessian - float v2 = img.at(r, c)*2.f; - float dxx = (img.at(r, c+1) + img.at(r, c-1) - v2)*second_deriv_scale; - float dyy = (img.at(r+1, c) + img.at(r-1, c) - v2)*second_deriv_scale; - float dxy = (img.at(r+1, c+1) - img.at(r+1, c-1) - - img.at(r-1, c+1) + img.at(r-1, c-1)) * cross_deriv_scale; - float tr = dxx + dyy; - float det = dxx * dyy - dxy * dxy; - - if( det <= 0 || tr*tr*edgeThreshold >= (edgeThreshold + 1)*(edgeThreshold + 1)*det ) - return false; - } - - kpt.pt.x = (c + xc) * (1 << octv); - kpt.pt.y = (r + xr) * (1 << octv); - kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16); - kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2; - kpt.response = std::abs(contr); - - return true; -} - - class findScaleSpaceExtremaComputer : public ParallelLoopBody { public: @@ -589,84 +296,10 @@ public: { CV_TRACE_FUNCTION(); - const int begin = range.start; - const int end = range.end; + std::vector& kpts = tls_kpts_struct.getRef(); - static const int n = SIFT_ORI_HIST_BINS; - float hist[n]; - - const Mat& img = dog_pyr[idx]; - const Mat& prev = dog_pyr[idx-1]; - const Mat& next = dog_pyr[idx+1]; - - std::vector *tls_kpts = tls_kpts_struct.get(); - - KeyPoint kpt; - for( int r = begin; r < end; r++) - { - const sift_wt* currptr = img.ptr(r); - const sift_wt* prevptr = prev.ptr(r); - const sift_wt* nextptr = next.ptr(r); - - for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++) - { - sift_wt val = currptr[c]; - - // find local extrema with pixel accuracy - if( std::abs(val) > threshold && - ((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] && - val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] && - val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] && - val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] && - val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] && - val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] && - val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] && - val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] && - val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) || - (val < 0 && val <= currptr[c-1] && val <= currptr[c+1] && - val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] && - val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] && - val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] && - val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] && - val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] && - val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] && - val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] && - val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1]))) - { - CV_TRACE_REGION("pixel_candidate"); - - int r1 = r, c1 = c, layer = i; - if( !adjustLocalExtrema(dog_pyr, kpt, o, layer, r1, c1, - nOctaveLayers, (float)contrastThreshold, - (float)edgeThreshold, (float)sigma) ) - continue; - float scl_octv = kpt.size*0.5f/(1 << o); - float omax = calcOrientationHist(gauss_pyr[o*(nOctaveLayers+3) + layer], - Point(c1, r1), - cvRound(SIFT_ORI_RADIUS * scl_octv), - SIFT_ORI_SIG_FCTR * scl_octv, - hist, n); - float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO); - for( int j = 0; j < n; j++ ) - { - int l = j > 0 ? j - 1 : n - 1; - int r2 = j < n-1 ? j + 1 : 0; - - if( hist[j] > hist[l] && hist[j] > hist[r2] && hist[j] >= mag_thr ) - { - float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]); - bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin; - kpt.angle = 360.f - (float)((360.f/n) * bin); - if(std::abs(kpt.angle - 360.f) < FLT_EPSILON) - kpt.angle = 0.f; - { - tls_kpts->push_back(kpt); - } - } - } - } - } - } + CV_CPU_DISPATCH(findScaleSpaceExtrema, (o, i, threshold, idx, step, cols, nOctaveLayers, contrastThreshold, edgeThreshold, sigma, gauss_pyr, dog_pyr, kpts, range), + CV_CPU_DISPATCH_MODES_ALL); } private: int o, i; @@ -721,299 +354,16 @@ void SIFT_Impl::findScaleSpaceExtrema( const std::vector& gauss_pyr, const } -static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl, - int d, int n, float* dst ) +static +void calcSIFTDescriptor( + const Mat& img, Point2f ptf, float ori, float scl, + int d, int n, float* dst +) { CV_TRACE_FUNCTION(); - Point pt(cvRound(ptf.x), cvRound(ptf.y)); - float cos_t = cosf(ori*(float)(CV_PI/180)); - float sin_t = sinf(ori*(float)(CV_PI/180)); - float bins_per_rad = n / 360.f; - float exp_scale = -1.f/(d * d * 0.5f); - float hist_width = SIFT_DESCR_SCL_FCTR * scl; - int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f); - // Clip the radius to the diagonal of the image to avoid autobuffer too large exception - radius = std::min(radius, (int) sqrt(((double) img.cols)*img.cols + ((double) img.rows)*img.rows)); - cos_t /= hist_width; - sin_t /= hist_width; - - int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2); - int rows = img.rows, cols = img.cols; - - AutoBuffer buf(len*6 + histlen); - float *X = buf.data(), *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len; - float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len; - - for( i = 0; i < d+2; i++ ) - { - for( j = 0; j < d+2; j++ ) - for( k = 0; k < n+2; k++ ) - hist[(i*(d+2) + j)*(n+2) + k] = 0.; - } - - for( i = -radius, k = 0; i <= radius; i++ ) - for( j = -radius; j <= radius; j++ ) - { - // Calculate sample's histogram array coords rotated relative to ori. - // Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e. - // r_rot = 1.5) have full weight placed in row 1 after interpolation. - float c_rot = j * cos_t - i * sin_t; - float r_rot = j * sin_t + i * cos_t; - float rbin = r_rot + d/2 - 0.5f; - float cbin = c_rot + d/2 - 0.5f; - int r = pt.y + i, c = pt.x + j; - - if( rbin > -1 && rbin < d && cbin > -1 && cbin < d && - r > 0 && r < rows - 1 && c > 0 && c < cols - 1 ) - { - float dx = (float)(img.at(r, c+1) - img.at(r, c-1)); - float dy = (float)(img.at(r-1, c) - img.at(r+1, c)); - X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin; - W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale; - k++; - } - } - - len = k; - cv::hal::fastAtan2(Y, X, Ori, len, true); - cv::hal::magnitude32f(X, Y, Mag, len); - cv::hal::exp32f(W, W, len); - - k = 0; -#if CV_AVX2 - { - int CV_DECL_ALIGNED(32) idx_buf[8]; - float CV_DECL_ALIGNED(32) rco_buf[64]; - const __m256 __ori = _mm256_set1_ps(ori); - const __m256 __bins_per_rad = _mm256_set1_ps(bins_per_rad); - const __m256i __n = _mm256_set1_epi32(n); - for( ; k <= len - 8; k+=8 ) - { - __m256 __rbin = _mm256_loadu_ps(&RBin[k]); - __m256 __cbin = _mm256_loadu_ps(&CBin[k]); - __m256 __obin = _mm256_mul_ps(_mm256_sub_ps(_mm256_loadu_ps(&Ori[k]), __ori), __bins_per_rad); - __m256 __mag = _mm256_mul_ps(_mm256_loadu_ps(&Mag[k]), _mm256_loadu_ps(&W[k])); - - __m256 __r0 = _mm256_floor_ps(__rbin); - __rbin = _mm256_sub_ps(__rbin, __r0); - __m256 __c0 = _mm256_floor_ps(__cbin); - __cbin = _mm256_sub_ps(__cbin, __c0); - __m256 __o0 = _mm256_floor_ps(__obin); - __obin = _mm256_sub_ps(__obin, __o0); - - __m256i __o0i = _mm256_cvtps_epi32(__o0); - __o0i = _mm256_add_epi32(__o0i, _mm256_and_si256(__n, _mm256_cmpgt_epi32(_mm256_setzero_si256(), __o0i))); - __o0i = _mm256_sub_epi32(__o0i, _mm256_andnot_si256(_mm256_cmpgt_epi32(__n, __o0i), __n)); - - __m256 __v_r1 = _mm256_mul_ps(__mag, __rbin); - __m256 __v_r0 = _mm256_sub_ps(__mag, __v_r1); - - __m256 __v_rc11 = _mm256_mul_ps(__v_r1, __cbin); - __m256 __v_rc10 = _mm256_sub_ps(__v_r1, __v_rc11); - - __m256 __v_rc01 = _mm256_mul_ps(__v_r0, __cbin); - __m256 __v_rc00 = _mm256_sub_ps(__v_r0, __v_rc01); - - __m256 __v_rco111 = _mm256_mul_ps(__v_rc11, __obin); - __m256 __v_rco110 = _mm256_sub_ps(__v_rc11, __v_rco111); - - __m256 __v_rco101 = _mm256_mul_ps(__v_rc10, __obin); - __m256 __v_rco100 = _mm256_sub_ps(__v_rc10, __v_rco101); - - __m256 __v_rco011 = _mm256_mul_ps(__v_rc01, __obin); - __m256 __v_rco010 = _mm256_sub_ps(__v_rc01, __v_rco011); - - __m256 __v_rco001 = _mm256_mul_ps(__v_rc00, __obin); - __m256 __v_rco000 = _mm256_sub_ps(__v_rc00, __v_rco001); - - __m256i __one = _mm256_set1_epi32(1); - __m256i __idx = _mm256_add_epi32( - _mm256_mullo_epi32( - _mm256_add_epi32( - _mm256_mullo_epi32(_mm256_add_epi32(_mm256_cvtps_epi32(__r0), __one), _mm256_set1_epi32(d + 2)), - _mm256_add_epi32(_mm256_cvtps_epi32(__c0), __one)), - _mm256_set1_epi32(n + 2)), - __o0i); - - _mm256_store_si256((__m256i *)idx_buf, __idx); - - _mm256_store_ps(&(rco_buf[0]), __v_rco000); - _mm256_store_ps(&(rco_buf[8]), __v_rco001); - _mm256_store_ps(&(rco_buf[16]), __v_rco010); - _mm256_store_ps(&(rco_buf[24]), __v_rco011); - _mm256_store_ps(&(rco_buf[32]), __v_rco100); - _mm256_store_ps(&(rco_buf[40]), __v_rco101); - _mm256_store_ps(&(rco_buf[48]), __v_rco110); - _mm256_store_ps(&(rco_buf[56]), __v_rco111); - #define HIST_SUM_HELPER(id) \ - hist[idx_buf[(id)]] += rco_buf[(id)]; \ - hist[idx_buf[(id)]+1] += rco_buf[8 + (id)]; \ - hist[idx_buf[(id)]+(n+2)] += rco_buf[16 + (id)]; \ - hist[idx_buf[(id)]+(n+3)] += rco_buf[24 + (id)]; \ - hist[idx_buf[(id)]+(d+2)*(n+2)] += rco_buf[32 + (id)]; \ - hist[idx_buf[(id)]+(d+2)*(n+2)+1] += rco_buf[40 + (id)]; \ - hist[idx_buf[(id)]+(d+3)*(n+2)] += rco_buf[48 + (id)]; \ - hist[idx_buf[(id)]+(d+3)*(n+2)+1] += rco_buf[56 + (id)]; - - HIST_SUM_HELPER(0); - HIST_SUM_HELPER(1); - HIST_SUM_HELPER(2); - HIST_SUM_HELPER(3); - HIST_SUM_HELPER(4); - HIST_SUM_HELPER(5); - HIST_SUM_HELPER(6); - HIST_SUM_HELPER(7); - - #undef HIST_SUM_HELPER - } - } -#endif - for( ; k < len; k++ ) - { - float rbin = RBin[k], cbin = CBin[k]; - float obin = (Ori[k] - ori)*bins_per_rad; - float mag = Mag[k]*W[k]; - - int r0 = cvFloor( rbin ); - int c0 = cvFloor( cbin ); - int o0 = cvFloor( obin ); - rbin -= r0; - cbin -= c0; - obin -= o0; - - if( o0 < 0 ) - o0 += n; - if( o0 >= n ) - o0 -= n; - - // histogram update using tri-linear interpolation - float v_r1 = mag*rbin, v_r0 = mag - v_r1; - float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11; - float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01; - float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111; - float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101; - float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011; - float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001; - - int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0; - hist[idx] += v_rco000; - hist[idx+1] += v_rco001; - hist[idx+(n+2)] += v_rco010; - hist[idx+(n+3)] += v_rco011; - hist[idx+(d+2)*(n+2)] += v_rco100; - hist[idx+(d+2)*(n+2)+1] += v_rco101; - hist[idx+(d+3)*(n+2)] += v_rco110; - hist[idx+(d+3)*(n+2)+1] += v_rco111; - } - - // finalize histogram, since the orientation histograms are circular - for( i = 0; i < d; i++ ) - for( j = 0; j < d; j++ ) - { - int idx = ((i+1)*(d+2) + (j+1))*(n+2); - hist[idx] += hist[idx+n]; - hist[idx+1] += hist[idx+n+1]; - for( k = 0; k < n; k++ ) - dst[(i*d + j)*n + k] = hist[idx+k]; - } - // copy histogram to the descriptor, - // apply hysteresis thresholding - // and scale the result, so that it can be easily converted - // to byte array - float nrm2 = 0; - len = d*d*n; - k = 0; -#if CV_AVX2 - { - float CV_DECL_ALIGNED(32) nrm2_buf[8]; - __m256 __nrm2 = _mm256_setzero_ps(); - __m256 __dst; - for( ; k <= len - 8; k += 8 ) - { - __dst = _mm256_loadu_ps(&dst[k]); -#if CV_FMA3 - __nrm2 = _mm256_fmadd_ps(__dst, __dst, __nrm2); -#else - __nrm2 = _mm256_add_ps(__nrm2, _mm256_mul_ps(__dst, __dst)); -#endif - } - _mm256_store_ps(nrm2_buf, __nrm2); - nrm2 = nrm2_buf[0] + nrm2_buf[1] + nrm2_buf[2] + nrm2_buf[3] + - nrm2_buf[4] + nrm2_buf[5] + nrm2_buf[6] + nrm2_buf[7]; - } -#endif - for( ; k < len; k++ ) - nrm2 += dst[k]*dst[k]; - - float thr = std::sqrt(nrm2)*SIFT_DESCR_MAG_THR; - - i = 0, nrm2 = 0; -#if 0 //CV_AVX2 - // This code cannot be enabled because it sums nrm2 in a different order, - // thus producing slightly different results - { - float CV_DECL_ALIGNED(32) nrm2_buf[8]; - __m256 __dst; - __m256 __nrm2 = _mm256_setzero_ps(); - __m256 __thr = _mm256_set1_ps(thr); - for( ; i <= len - 8; i += 8 ) - { - __dst = _mm256_loadu_ps(&dst[i]); - __dst = _mm256_min_ps(__dst, __thr); - _mm256_storeu_ps(&dst[i], __dst); -#if CV_FMA3 - __nrm2 = _mm256_fmadd_ps(__dst, __dst, __nrm2); -#else - __nrm2 = _mm256_add_ps(__nrm2, _mm256_mul_ps(__dst, __dst)); -#endif - } - _mm256_store_ps(nrm2_buf, __nrm2); - nrm2 = nrm2_buf[0] + nrm2_buf[1] + nrm2_buf[2] + nrm2_buf[3] + - nrm2_buf[4] + nrm2_buf[5] + nrm2_buf[6] + nrm2_buf[7]; - } -#endif - for( ; i < len; i++ ) - { - float val = std::min(dst[i], thr); - dst[i] = val; - nrm2 += val*val; - } - nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON); - -#if 1 - k = 0; -#if CV_AVX2 - { - __m256 __dst; - __m256 __min = _mm256_setzero_ps(); - __m256 __max = _mm256_set1_ps(255.0f); // max of uchar - __m256 __nrm2 = _mm256_set1_ps(nrm2); - for( k = 0; k <= len - 8; k+=8 ) - { - __dst = _mm256_loadu_ps(&dst[k]); - __dst = _mm256_min_ps(_mm256_max_ps(_mm256_round_ps(_mm256_mul_ps(__dst, __nrm2), _MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC), __min), __max); - _mm256_storeu_ps(&dst[k], __dst); - } - } -#endif - for( ; k < len; k++ ) - { - dst[k] = saturate_cast(dst[k]*nrm2); - } -#else - float nrm1 = 0; - for( k = 0; k < len; k++ ) - { - dst[k] *= nrm2; - nrm1 += dst[k]; - } - nrm1 = 1.f/std::max(nrm1, FLT_EPSILON); - for( k = 0; k < len; k++ ) - { - dst[k] = std::sqrt(dst[k] * nrm1);//saturate_cast(std::sqrt(dst[k] * nrm1)*SIFT_INT_DESCR_FCTR); - } -#endif + CV_CPU_DISPATCH(calcSIFTDescriptor, (img, ptf, ori, scl, d, n, dst), + CV_CPU_DISPATCH_MODES_ALL); } class calcDescriptorsComputer : public ParallelLoopBody diff --git a/modules/features2d/src/sift.simd.hpp b/modules/features2d/src/sift.simd.hpp index 81254ecbd8..c0f9b5b1e2 100644 --- a/modules/features2d/src/sift.simd.hpp +++ b/modules/features2d/src/sift.simd.hpp @@ -70,63 +70,13 @@ \**********************************************************************************************/ #include "precomp.hpp" -#include -#include + #include +#include "opencv2/core/hal/intrin.hpp" -#include - -namespace cv -{ - -/*! - SIFT implementation. - - The class implements SIFT algorithm by D. Lowe. - */ -class SIFT_Impl : public SIFT -{ -public: - explicit SIFT_Impl( int nfeatures = 0, int nOctaveLayers = 3, - double contrastThreshold = 0.04, double edgeThreshold = 10, - double sigma = 1.6); - - //! returns the descriptor size in floats (128) - int descriptorSize() const CV_OVERRIDE; - - //! returns the descriptor type - int descriptorType() const CV_OVERRIDE; - - //! returns the default norm type - int defaultNorm() const CV_OVERRIDE; - - //! finds the keypoints and computes descriptors for them using SIFT algorithm. - //! Optionally it can compute descriptors for the user-provided keypoints - void detectAndCompute(InputArray img, InputArray mask, - std::vector& keypoints, - OutputArray descriptors, - bool useProvidedKeypoints = false) CV_OVERRIDE; - - void buildGaussianPyramid( const Mat& base, std::vector& pyr, int nOctaves ) const; - void buildDoGPyramid( const std::vector& pyr, std::vector& dogpyr ) const; - void findScaleSpaceExtrema( const std::vector& gauss_pyr, const std::vector& dog_pyr, - std::vector& keypoints ) const; - -protected: - CV_PROP_RW int nfeatures; - CV_PROP_RW int nOctaveLayers; - CV_PROP_RW double contrastThreshold; - CV_PROP_RW double edgeThreshold; - CV_PROP_RW double sigma; -}; - -Ptr SIFT::create( int _nfeatures, int _nOctaveLayers, - double _contrastThreshold, double _edgeThreshold, double _sigma ) -{ - CV_TRACE_FUNCTION(); - return makePtr(_nfeatures, _nOctaveLayers, _contrastThreshold, _edgeThreshold, _sigma); -} +namespace cv { +#if !defined(CV_CPU_DISPATCH_MODE) || !defined(CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY) /******************************* Defs and macros *****************************/ // default width of descriptor histogram array @@ -151,7 +101,7 @@ static const int SIFT_ORI_HIST_BINS = 36; static const float SIFT_ORI_SIG_FCTR = 1.5f; // determines the radius of the region used in orientation assignment -static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR; +static const float SIFT_ORI_RADIUS = 4.5f; // 3 * SIFT_ORI_SIG_FCTR; // orientation magnitude relative to max that results in new feature static const float SIFT_ORI_PEAK_RATIO = 0.8f; @@ -176,144 +126,41 @@ typedef float sift_wt; static const int SIFT_FIXPT_SCALE = 1; #endif -static inline void -unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale) -{ - octave = kpt.octave & 255; - layer = (kpt.octave >> 8) & 255; - octave = octave < 128 ? octave : (-128 | octave); - scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave); -} - -static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma ) -{ - CV_TRACE_FUNCTION(); - - Mat gray, gray_fpt; - if( img.channels() == 3 || img.channels() == 4 ) - { - cvtColor(img, gray, COLOR_BGR2GRAY); - gray.convertTo(gray_fpt, DataType::type, SIFT_FIXPT_SCALE, 0); - } - else - img.convertTo(gray_fpt, DataType::type, SIFT_FIXPT_SCALE, 0); - - float sig_diff; - - if( doubleImageSize ) - { - sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) ); - Mat dbl; -#if DoG_TYPE_SHORT - resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR_EXACT); -#else - resize(gray_fpt, dbl, Size(gray_fpt.cols*2, gray_fpt.rows*2), 0, 0, INTER_LINEAR); -#endif - Mat result; - GaussianBlur(dbl, result, Size(), sig_diff, sig_diff); - return result; - } - else - { - sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) ); - Mat result; - GaussianBlur(gray_fpt, result, Size(), sig_diff, sig_diff); - return result; - } -} +#endif // definitions and macros -void SIFT_Impl::buildGaussianPyramid( const Mat& base, std::vector& pyr, int nOctaves ) const -{ - CV_TRACE_FUNCTION(); +CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN - std::vector sig(nOctaveLayers + 3); - pyr.resize(nOctaves*(nOctaveLayers + 3)); +void findScaleSpaceExtrema( + int octave, + int layer, + int threshold, + int idx, + int step, + int cols, + int nOctaveLayers, + double contrastThreshold, + double edgeThreshold, + double sigma, + const std::vector& gauss_pyr, + const std::vector& dog_pyr, + std::vector& kpts, + const cv::Range& range); - // precompute Gaussian sigmas using the following formula: - // \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2 - sig[0] = sigma; - double k = std::pow( 2., 1. / nOctaveLayers ); - for( int i = 1; i < nOctaveLayers + 3; i++ ) - { - double sig_prev = std::pow(k, (double)(i-1))*sigma; - double sig_total = sig_prev*k; - sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev); - } - - for( int o = 0; o < nOctaves; o++ ) - { - for( int i = 0; i < nOctaveLayers + 3; i++ ) - { - Mat& dst = pyr[o*(nOctaveLayers + 3) + i]; - if( o == 0 && i == 0 ) - dst = base; - // base of new octave is halved image from end of previous octave - else if( i == 0 ) - { - const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers]; - resize(src, dst, Size(src.cols/2, src.rows/2), - 0, 0, INTER_NEAREST); - } - else - { - const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1]; - GaussianBlur(src, dst, Size(), sig[i], sig[i]); - } - } - } -} +void calcSIFTDescriptor( + const Mat& img, Point2f ptf, float ori, float scl, + int d, int n, float* dst +); -class buildDoGPyramidComputer : public ParallelLoopBody -{ -public: - buildDoGPyramidComputer( - int _nOctaveLayers, - const std::vector& _gpyr, - std::vector& _dogpyr) - : nOctaveLayers(_nOctaveLayers), - gpyr(_gpyr), - dogpyr(_dogpyr) { } - - void operator()( const cv::Range& range ) const CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - - const int begin = range.start; - const int end = range.end; - - for( int a = begin; a < end; a++ ) - { - const int o = a / (nOctaveLayers + 2); - const int i = a % (nOctaveLayers + 2); - - const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i]; - const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1]; - Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i]; - subtract(src2, src1, dst, noArray(), DataType::type); - } - } - -private: - int nOctaveLayers; - const std::vector& gpyr; - std::vector& dogpyr; -}; - -void SIFT_Impl::buildDoGPyramid( const std::vector& gpyr, std::vector& dogpyr ) const -{ - CV_TRACE_FUNCTION(); - - int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3); - dogpyr.resize( nOctaves*(nOctaveLayers + 2) ); - - parallel_for_(Range(0, nOctaves * (nOctaveLayers + 2)), buildDoGPyramidComputer(nOctaveLayers, gpyr, dogpyr)); -} +#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY // Computes a gradient orientation histogram at a specified pixel -static float calcOrientationHist( const Mat& img, Point pt, int radius, - float sigma, float* hist, int n ) +static +float calcOrientationHist( + const Mat& img, Point pt, int radius, + float sigma, float* hist, int n +) { CV_TRACE_FUNCTION(); @@ -449,9 +296,12 @@ static float calcOrientationHist( const Mat& img, Point pt, int radius, // Interpolates a scale-space extremum's location and scale to subpixel // accuracy to form an image feature. Rejects features with low contrast. // Based on Section 4 of Lowe's paper. -static bool adjustLocalExtrema( const std::vector& dog_pyr, KeyPoint& kpt, int octv, - int& layer, int& r, int& c, int nOctaveLayers, - float contrastThreshold, float edgeThreshold, float sigma ) +static +bool adjustLocalExtrema( + const std::vector& dog_pyr, KeyPoint& kpt, int octv, + int& layer, int& r, int& c, int nOctaveLayers, + float contrastThreshold, float edgeThreshold, float sigma +) { CV_TRACE_FUNCTION(); @@ -553,11 +403,12 @@ static bool adjustLocalExtrema( const std::vector& dog_pyr, KeyPoint& kpt, return true; } +namespace { -class findScaleSpaceExtremaComputer : public ParallelLoopBody +class findScaleSpaceExtremaT { public: - findScaleSpaceExtremaComputer( + findScaleSpaceExtremaT( int _o, int _i, int _threshold, @@ -570,7 +421,7 @@ public: double _sigma, const std::vector& _gauss_pyr, const std::vector& _dog_pyr, - TLSData > &_tls_kpts_struct) + std::vector& kpts) : o(_o), i(_i), @@ -584,8 +435,11 @@ public: sigma(_sigma), gauss_pyr(_gauss_pyr), dog_pyr(_dog_pyr), - tls_kpts_struct(_tls_kpts_struct) { } - void operator()( const cv::Range& range ) const CV_OVERRIDE + kpts_(kpts) + { + // nothing + } + void process(const cv::Range& range) { CV_TRACE_FUNCTION(); @@ -593,15 +447,12 @@ public: const int end = range.end; static const int n = SIFT_ORI_HIST_BINS; - float hist[n]; + float CV_DECL_ALIGNED(CV_SIMD_WIDTH) hist[n]; const Mat& img = dog_pyr[idx]; const Mat& prev = dog_pyr[idx-1]; const Mat& next = dog_pyr[idx+1]; - std::vector *tls_kpts = tls_kpts_struct.get(); - - KeyPoint kpt; for( int r = begin; r < end; r++) { const sift_wt* currptr = img.ptr(r); @@ -635,6 +486,7 @@ public: { CV_TRACE_REGION("pixel_candidate"); + KeyPoint kpt; int r1 = r, c1 = c, layer = i; if( !adjustLocalExtrema(dog_pyr, kpt, o, layer, r1, c1, nOctaveLayers, (float)contrastThreshold, @@ -659,9 +511,8 @@ public: kpt.angle = 360.f - (float)((360.f/n) * bin); if(std::abs(kpt.angle - 360.f) < FLT_EPSILON) kpt.angle = 0.f; - { - tls_kpts->push_back(kpt); - } + + kpts_.push_back(kpt); } } } @@ -678,51 +529,42 @@ private: double sigma; const std::vector& gauss_pyr; const std::vector& dog_pyr; - TLSData > &tls_kpts_struct; + std::vector& kpts_; }; -// -// Detects features at extrema in DoG scale space. Bad features are discarded -// based on contrast and ratio of principal curvatures. -void SIFT_Impl::findScaleSpaceExtrema( const std::vector& gauss_pyr, const std::vector& dog_pyr, - std::vector& keypoints ) const +} // namespace + + +void findScaleSpaceExtrema( + int octave, + int layer, + int threshold, + int idx, + int step, + int cols, + int nOctaveLayers, + double contrastThreshold, + double edgeThreshold, + double sigma, + const std::vector& gauss_pyr, + const std::vector& dog_pyr, + std::vector& kpts, + const cv::Range& range) { CV_TRACE_FUNCTION(); - const int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3); - const int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE); - - keypoints.clear(); - TLSDataAccumulator > tls_kpts_struct; - - for( int o = 0; o < nOctaves; o++ ) - for( int i = 1; i <= nOctaveLayers; i++ ) - { - const int idx = o*(nOctaveLayers+2)+i; - const Mat& img = dog_pyr[idx]; - const int step = (int)img.step1(); - const int rows = img.rows, cols = img.cols; - - parallel_for_(Range(SIFT_IMG_BORDER, rows-SIFT_IMG_BORDER), - findScaleSpaceExtremaComputer( - o, i, threshold, idx, step, cols, - nOctaveLayers, - contrastThreshold, - edgeThreshold, - sigma, - gauss_pyr, dog_pyr, tls_kpts_struct)); - } - - std::vector*> kpt_vecs; - tls_kpts_struct.gather(kpt_vecs); - for (size_t i = 0; i < kpt_vecs.size(); ++i) { - keypoints.insert(keypoints.end(), kpt_vecs[i]->begin(), kpt_vecs[i]->end()); - } + findScaleSpaceExtremaT(octave, layer, threshold, idx, + step, cols, + nOctaveLayers, contrastThreshold, edgeThreshold, sigma, + gauss_pyr, dog_pyr, + kpts) + .process(range); } - -static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl, - int d, int n, float* dst ) +void calcSIFTDescriptor( + const Mat& img, Point2f ptf, float ori, float scl, + int d, int n, float* dst +) { CV_TRACE_FUNCTION(); @@ -734,7 +576,7 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc float hist_width = SIFT_DESCR_SCL_FCTR * scl; int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f); // Clip the radius to the diagonal of the image to avoid autobuffer too large exception - radius = std::min(radius, (int) sqrt(((double) img.cols)*img.cols + ((double) img.rows)*img.rows)); + radius = std::min(radius, (int)std::sqrt(((double) img.cols)*img.cols + ((double) img.rows)*img.rows)); cos_t /= hist_width; sin_t /= hist_width; @@ -1016,175 +858,6 @@ static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float sc #endif } -class calcDescriptorsComputer : public ParallelLoopBody -{ -public: - calcDescriptorsComputer(const std::vector& _gpyr, - const std::vector& _keypoints, - Mat& _descriptors, - int _nOctaveLayers, - int _firstOctave) - : gpyr(_gpyr), - keypoints(_keypoints), - descriptors(_descriptors), - nOctaveLayers(_nOctaveLayers), - firstOctave(_firstOctave) { } - - void operator()( const cv::Range& range ) const CV_OVERRIDE - { - CV_TRACE_FUNCTION(); - - const int begin = range.start; - const int end = range.end; - - static const int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS; - - for ( int i = begin; i= firstOctave && layer <= nOctaveLayers+2); - float size=kpt.size*scale; - Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale); - const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer]; - - float angle = 360.f - kpt.angle; - if(std::abs(angle - 360.f) < FLT_EPSILON) - angle = 0.f; - calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr((int)i)); - } - } -private: - const std::vector& gpyr; - const std::vector& keypoints; - Mat& descriptors; - int nOctaveLayers; - int firstOctave; -}; - -static void calcDescriptors(const std::vector& gpyr, const std::vector& keypoints, - Mat& descriptors, int nOctaveLayers, int firstOctave ) -{ - CV_TRACE_FUNCTION(); - parallel_for_(Range(0, static_cast(keypoints.size())), calcDescriptorsComputer(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave)); -} - -////////////////////////////////////////////////////////////////////////////////////////// - -SIFT_Impl::SIFT_Impl( int _nfeatures, int _nOctaveLayers, - double _contrastThreshold, double _edgeThreshold, double _sigma ) - : nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers), - contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma) -{ -} - -int SIFT_Impl::descriptorSize() const -{ - return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS; -} - -int SIFT_Impl::descriptorType() const -{ - return CV_32F; -} - -int SIFT_Impl::defaultNorm() const -{ - return NORM_L2; -} - - -void SIFT_Impl::detectAndCompute(InputArray _image, InputArray _mask, - std::vector& keypoints, - OutputArray _descriptors, - bool useProvidedKeypoints) -{ - CV_TRACE_FUNCTION(); - - int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0; - Mat image = _image.getMat(), mask = _mask.getMat(); - - if( image.empty() || image.depth() != CV_8U ) - CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" ); - - if( !mask.empty() && mask.type() != CV_8UC1 ) - CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); - - if( useProvidedKeypoints ) - { - firstOctave = 0; - int maxOctave = INT_MIN; - for( size_t i = 0; i < keypoints.size(); i++ ) - { - int octave, layer; - float scale; - unpackOctave(keypoints[i], octave, layer, scale); - firstOctave = std::min(firstOctave, octave); - maxOctave = std::max(maxOctave, octave); - actualNLayers = std::max(actualNLayers, layer-2); - } - - firstOctave = std::min(firstOctave, 0); - CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers ); - actualNOctaves = maxOctave - firstOctave + 1; - } - - Mat base = createInitialImage(image, firstOctave < 0, (float)sigma); - std::vector gpyr; - int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave; - - //double t, tf = getTickFrequency(); - //t = (double)getTickCount(); - buildGaussianPyramid(base, gpyr, nOctaves); - - //t = (double)getTickCount() - t; - //printf("pyramid construction time: %g\n", t*1000./tf); - - if( !useProvidedKeypoints ) - { - std::vector dogpyr; - buildDoGPyramid(gpyr, dogpyr); - //t = (double)getTickCount(); - findScaleSpaceExtrema(gpyr, dogpyr, keypoints); - KeyPointsFilter::removeDuplicatedSorted( keypoints ); - - if( nfeatures > 0 ) - KeyPointsFilter::retainBest(keypoints, nfeatures); - //t = (double)getTickCount() - t; - //printf("keypoint detection time: %g\n", t*1000./tf); - - if( firstOctave < 0 ) - for( size_t i = 0; i < keypoints.size(); i++ ) - { - KeyPoint& kpt = keypoints[i]; - float scale = 1.f/(float)(1 << -firstOctave); - kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255); - kpt.pt *= scale; - kpt.size *= scale; - } - - if( !mask.empty() ) - KeyPointsFilter::runByPixelsMask( keypoints, mask ); - } - else - { - // filter keypoints by mask - //KeyPointsFilter::runByPixelsMask( keypoints, mask ); - } - - if( _descriptors.needed() ) - { - //t = (double)getTickCount(); - int dsize = descriptorSize(); - _descriptors.create((int)keypoints.size(), dsize, CV_32F); - Mat descriptors = _descriptors.getMat(); - - calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave); - //t = (double)getTickCount() - t; - //printf("descriptor extraction time: %g\n", t*1000./tf); - } -} - -} +#endif +CV_CPU_OPTIMIZATION_NAMESPACE_END +} // namespace From 1622e7cc90e7d51cc6f0598eb15efebf1902ca69 Mon Sep 17 00:00:00 2001 From: Daniel Mallia Date: Mon, 4 May 2020 23:20:52 -0400 Subject: [PATCH 09/18] Update NumPy links --- doc/py_tutorials/py_setup/py_intro/py_intro.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/py_tutorials/py_setup/py_intro/py_intro.markdown b/doc/py_tutorials/py_setup/py_intro/py_intro.markdown index c8041bb2c5..487ba72ee7 100644 --- a/doc/py_tutorials/py_setup/py_intro/py_intro.markdown +++ b/doc/py_tutorials/py_setup/py_intro/py_intro.markdown @@ -80,7 +80,7 @@ Additional Resources -------------------- -# A Quick guide to Python - [A Byte of Python](http://swaroopch.com/notes/python/) -2. [Basic Numpy Tutorials](http://wiki.scipy.org/Tentative_NumPy_Tutorial) -3. [Numpy Examples List](http://wiki.scipy.org/Numpy_Example_List) +2. [NumPy Quickstart tutorial](https://numpy.org/devdocs/user/quickstart.html) +3. [NumPy Reference](https://numpy.org/devdocs/reference/index.html#reference) 4. [OpenCV Documentation](http://docs.opencv.org/) 5. [OpenCV Forum](http://answers.opencv.org/questions/) From 64b0757758771c97ef6c8ec1e355453d58bacab3 Mon Sep 17 00:00:00 2001 From: Daniel Mallia Date: Mon, 4 May 2020 23:50:22 -0400 Subject: [PATCH 10/18] Update Supported formula commands - MathJax link --- .../documenting_opencv/documentation_tutorial.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown b/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown index 2493116dc6..058f392afe 100644 --- a/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown +++ b/doc/tutorials/introduction/documenting_opencv/documentation_tutorial.markdown @@ -690,6 +690,6 @@ References {#tutorial_documentation_refs} [Documenting basics]: http://www.doxygen.nl/manual/docblocks.html [Markdown support]: http://www.doxygen.nl/manual/markdown.html [Formulas support]: http://www.doxygen.nl/manual/formulas.html -[Supported formula commands]: http://docs.mathjax.org/en/latest/tex.html#supported-latex-commands +[Supported formula commands]: http://docs.mathjax.org/en/latest/input/tex/macros/index.html [Command reference]: http://www.doxygen.nl/manual/commands.html [Google Scholar]: http://scholar.google.ru/ From c934d9c3cca6b90fe164886356f30983e2318f35 Mon Sep 17 00:00:00 2001 From: Daniel Mallia Date: Tue, 5 May 2020 18:05:58 -0400 Subject: [PATCH 11/18] Update imgproc tutorials table of content Languages fields --- doc/tutorials/imgproc/table_of_content_imgproc.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/tutorials/imgproc/table_of_content_imgproc.markdown b/doc/tutorials/imgproc/table_of_content_imgproc.markdown index bd04b57717..fd5b1ca11f 100644 --- a/doc/tutorials/imgproc/table_of_content_imgproc.markdown +++ b/doc/tutorials/imgproc/table_of_content_imgproc.markdown @@ -15,6 +15,8 @@ In this section you will learn about the image processing (manipulation) functio - @subpage tutorial_random_generator_and_text + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Ana Huamán @@ -333,7 +335,7 @@ In this section you will learn about the image processing (manipulation) functio - @subpage tutorial_anisotropic_image_segmentation_by_a_gst - *Languages:* C++ + *Languages:* C++, Python *Compatibility:* \> OpenCV 2.0 From 7c17695be4b2624bfb9ac276eba86b117b92e812 Mon Sep 17 00:00:00 2001 From: Alexander Smorkalov Date: Wed, 6 May 2020 16:53:41 +0300 Subject: [PATCH 12/18] Added Java and C++ regression test for estimateNewCameraMatrixForUndistortRectify. --- .../calib3d/misc/java/test/Calib3dTest.java | 27 +++++++++++ modules/calib3d/test/test_fisheye.cpp | 45 +++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/modules/calib3d/misc/java/test/Calib3dTest.java b/modules/calib3d/misc/java/test/Calib3dTest.java index b474fc7442..ae5377efc5 100644 --- a/modules/calib3d/misc/java/test/Calib3dTest.java +++ b/modules/calib3d/misc/java/test/Calib3dTest.java @@ -682,4 +682,31 @@ public class Calib3dTest extends OpenCVTestCase { assertMatEqual(truth_rvec, rvecs.get(0), 10 * EPS); assertMatEqual(truth_tvec, tvecs.get(0), 1000 * EPS); } + + public void testEstimateNewCameraMatrixForUndistortRectify() { + Mat K = new Mat().eye(3, 3, CvType.CV_64FC1); + Mat K_new = new Mat().eye(3, 3, CvType.CV_64FC1); + Mat K_new_truth = new Mat().eye(3, 3, CvType.CV_64FC1); + Mat D = new Mat().zeros(4, 1, CvType.CV_64FC1); + + K.put(0,0,600.4447738238429); + K.put(1,1,578.9929805505851); + K.put(0,2,992.0642578801213); + K.put(1,2,549.2682624212172); + + D.put(0,0,-0.05090103223466704); + D.put(1,0,0.030944413642173308); + D.put(2,0,-0.021509225493198905); + D.put(3,0,0.0043378096628297145); + + K_new_truth.put(0,0, 387.4809086880343); + K_new_truth.put(0,2, 1036.669802754649); + K_new_truth.put(1,1, 373.6375700303157); + K_new_truth.put(1,2, 538.8373261247601); + + Calib3d.fisheye_estimateNewCameraMatrixForUndistortRectify(K,D,new Size(1920,1080), + new Mat().eye(3, 3, CvType.CV_64F), K_new, 0.0, new Size(1920,1080)); + + assertMatEqual(K_new, K_new_truth, EPS); + } } diff --git a/modules/calib3d/test/test_fisheye.cpp b/modules/calib3d/test/test_fisheye.cpp index 9dfb3ea876..eedc2fa4fe 100644 --- a/modules/calib3d/test/test_fisheye.cpp +++ b/modules/calib3d/test/test_fisheye.cpp @@ -656,6 +656,51 @@ TEST_F(fisheyeTest, CalibrationWithDifferentPointsNumber) cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6)); } +TEST_F(fisheyeTest, estimateNewCameraMatrixForUndistortRectify) +{ + cv::Size size(1920, 1080); + + cv::Mat K_fullhd(3, 3, cv::DataType::type); + K_fullhd.at(0, 0) = 600.44477382; + K_fullhd.at(0, 1) = 0.0; + K_fullhd.at(0, 2) = 992.06425788; + + K_fullhd.at(1, 0) = 0.0; + K_fullhd.at(1, 1) = 578.99298055; + K_fullhd.at(1, 2) = 549.26826242; + + K_fullhd.at(2, 0) = 0.0; + K_fullhd.at(2, 1) = 0.0; + K_fullhd.at(2, 2) = 1.0; + + cv::Mat K_new_truth(3, 3, cv::DataType::type); + + K_new_truth.at(0, 0) = 387.4809086880343; + K_new_truth.at(0, 1) = 0.0; + K_new_truth.at(0, 2) = 1036.669802754649; + + K_new_truth.at(1, 0) = 0.0; + K_new_truth.at(1, 1) = 373.6375700303157; + K_new_truth.at(1, 2) = 538.8373261247601; + + K_new_truth.at(2, 0) = 0.0; + K_new_truth.at(2, 1) = 0.0; + K_new_truth.at(2, 2) = 1.0; + + cv::Mat D_fullhd(4, 1, cv::DataType::type); + D_fullhd.at(0, 0) = -0.05090103223466704; + D_fullhd.at(1, 0) = 0.030944413642173308; + D_fullhd.at(2, 0) = -0.021509225493198905; + D_fullhd.at(3, 0) = 0.0043378096628297145; + cv::Mat E = cv::Mat::eye(3, 3, cv::DataType::type); + + cv::Mat K_new(3, 3, cv::DataType::type); + + cv::fisheye::estimateNewCameraMatrixForUndistortRectify(K_fullhd, D_fullhd, size, E, K_new, 0.0, size); + + EXPECT_MAT_NEAR(K_new, K_new_truth, 1e-6); +} + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// fisheyeTest:: From 4e789635c9df3343c2332c0c474797c6a688fef1 Mon Sep 17 00:00:00 2001 From: Daniel Mallia Date: Fri, 8 May 2020 14:22:30 -0400 Subject: [PATCH 13/18] Update tutorials tables of content for several modules --- .../calib3d/table_of_content_calib3d.markdown | 8 ++++++++ doc/tutorials/core/table_of_content_core.markdown | 10 ++++++++++ doc/tutorials/dnn/table_of_content_dnn.markdown | 14 ++++++++++++++ .../table_of_content_features2d.markdown | 6 ++++++ doc/tutorials/gpu/table_of_content_gpu.markdown | 4 ++++ .../imgcodecs/table_of_content_highgui.markdown | 2 ++ doc/tutorials/ios/table_of_content_ios.markdown | 6 ++++++ .../stitching/table_of_content_stitching.markdown | 2 ++ .../videoio/table_of_content_videoio.markdown | 8 ++++++++ doc/tutorials/viz/table_of_content_viz.markdown | 10 ++++++++++ 10 files changed, 70 insertions(+) diff --git a/doc/tutorials/calib3d/table_of_content_calib3d.markdown b/doc/tutorials/calib3d/table_of_content_calib3d.markdown index d99a4db59e..61679f12a0 100644 --- a/doc/tutorials/calib3d/table_of_content_calib3d.markdown +++ b/doc/tutorials/calib3d/table_of_content_calib3d.markdown @@ -5,6 +5,8 @@ Although we get most of our images in a 2D format they do come from a 3D world. - @subpage tutorial_camera_calibration_pattern + *Languages:* Python + *Compatibility:* \> OpenCV 2.0 *Author:* Laurent Berger @@ -13,6 +15,8 @@ Although we get most of our images in a 2D format they do come from a 3D world. - @subpage tutorial_camera_calibration_square_chess + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Victor Eruhimov @@ -21,6 +25,8 @@ Although we get most of our images in a 2D format they do come from a 3D world. - @subpage tutorial_camera_calibration + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -31,6 +37,8 @@ Although we get most of our images in a 2D format they do come from a 3D world. - @subpage tutorial_real_time_pose + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Edgar Riba diff --git a/doc/tutorials/core/table_of_content_core.markdown b/doc/tutorials/core/table_of_content_core.markdown index d775d8f0ee..2c8a38b098 100644 --- a/doc/tutorials/core/table_of_content_core.markdown +++ b/doc/tutorials/core/table_of_content_core.markdown @@ -6,6 +6,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_mat_the_basic_image_container + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -15,6 +17,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_how_to_scan_images + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -75,6 +79,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_file_input_output_with_xml_yml + *Languages:* C++, Python + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -84,6 +90,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_interoperability_with_OpenCV_1 + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -95,6 +103,8 @@ understanding how to manipulate the images on a pixel level. - @subpage tutorial_how_to_use_OpenCV_parallel_for_ + *Languages:* C++ + *Compatibility:* \>= OpenCV 2.4.3 You will see how to use the OpenCV parallel_for_ to easily parallelize your code. diff --git a/doc/tutorials/dnn/table_of_content_dnn.markdown b/doc/tutorials/dnn/table_of_content_dnn.markdown index 9a52f100db..85609ff660 100644 --- a/doc/tutorials/dnn/table_of_content_dnn.markdown +++ b/doc/tutorials/dnn/table_of_content_dnn.markdown @@ -3,6 +3,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_googlenet + *Languages:* C++ + *Compatibility:* \> OpenCV 3.3 *Author:* Vitaliy Lyudvichenko @@ -11,6 +13,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_halide + *Languages:* Halide + *Compatibility:* \> OpenCV 3.3 *Author:* Dmitry Kurtaev @@ -19,6 +23,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_halide_scheduling + *Languages:* Halide + *Compatibility:* \> OpenCV 3.3 *Author:* Dmitry Kurtaev @@ -27,6 +33,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_android + *Languages:* Java + *Compatibility:* \> OpenCV 3.3 *Author:* Dmitry Kurtaev @@ -35,6 +43,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_yolo + *Languages:* C++, Python + *Compatibility:* \> OpenCV 3.3.1 *Author:* Alessandro de Oliveira Faria @@ -43,6 +53,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_javascript + *Languages:* JavaScript + *Compatibility:* \> OpenCV 3.3.1 *Author:* Dmitry Kurtaev @@ -51,6 +63,8 @@ Deep Neural Networks (dnn module) {#tutorial_table_of_content_dnn} - @subpage tutorial_dnn_custom_layers + *Languages:* C++, Python + *Compatibility:* \> OpenCV 3.4.1 *Author:* Dmitry Kurtaev diff --git a/doc/tutorials/features2d/table_of_content_features2d.markdown b/doc/tutorials/features2d/table_of_content_features2d.markdown index 1f941f9d01..f42e2a571c 100644 --- a/doc/tutorials/features2d/table_of_content_features2d.markdown +++ b/doc/tutorials/features2d/table_of_content_features2d.markdown @@ -89,6 +89,8 @@ OpenCV. - @subpage tutorial_detection_of_planar_objects + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Victor Eruhimov @@ -108,6 +110,8 @@ OpenCV. - @subpage tutorial_akaze_tracking + *Languages:* C++ + *Compatibility:* \> OpenCV 3.0 *Author:* Fedor Morozov @@ -116,6 +120,8 @@ OpenCV. - @subpage tutorial_homography + *Languages:* C++, Java, Python + *Compatibility:* \> OpenCV 3.0 This tutorial will explain the basic concepts of the homography with some diff --git a/doc/tutorials/gpu/table_of_content_gpu.markdown b/doc/tutorials/gpu/table_of_content_gpu.markdown index 163f5e3b3f..1ea374335c 100644 --- a/doc/tutorials/gpu/table_of_content_gpu.markdown +++ b/doc/tutorials/gpu/table_of_content_gpu.markdown @@ -6,6 +6,8 @@ run the OpenCV algorithms. - @subpage tutorial_gpu_basics_similarity + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -16,6 +18,8 @@ run the OpenCV algorithms. - @subpage tutorial_gpu_thrust_interop + *Languages:* C++ + *Compatibility:* \>= OpenCV 3.0 This tutorial will show you how to wrap a GpuMat into a thrust iterator in order to be able to diff --git a/doc/tutorials/imgcodecs/table_of_content_highgui.markdown b/doc/tutorials/imgcodecs/table_of_content_highgui.markdown index e78e8276f4..b63b7b00ce 100644 --- a/doc/tutorials/imgcodecs/table_of_content_highgui.markdown +++ b/doc/tutorials/imgcodecs/table_of_content_highgui.markdown @@ -5,6 +5,8 @@ This section contains tutorials about how to read/save your image files. - @subpage tutorial_raster_io_gdal + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Marvin Smith diff --git a/doc/tutorials/ios/table_of_content_ios.markdown b/doc/tutorials/ios/table_of_content_ios.markdown index 70ee57375f..4031c6c80b 100644 --- a/doc/tutorials/ios/table_of_content_ios.markdown +++ b/doc/tutorials/ios/table_of_content_ios.markdown @@ -3,6 +3,8 @@ OpenCV iOS {#tutorial_table_of_content_ios} - @subpage tutorial_hello + *Languages:* Objective-C++ + *Compatibility:* \> OpenCV 2.4.3 *Author:* Charu Hans @@ -11,6 +13,8 @@ OpenCV iOS {#tutorial_table_of_content_ios} - @subpage tutorial_image_manipulation + *Languages:* Objective-C++ + *Compatibility:* \> OpenCV 2.4.3 *Author:* Charu Hans @@ -19,6 +23,8 @@ OpenCV iOS {#tutorial_table_of_content_ios} - @subpage tutorial_video_processing + *Languages:* Objective-C++ + *Compatibility:* \> OpenCV 2.4.3 *Author:* Eduard Feicho diff --git a/doc/tutorials/stitching/table_of_content_stitching.markdown b/doc/tutorials/stitching/table_of_content_stitching.markdown index d85571cd7e..d5972f4343 100644 --- a/doc/tutorials/stitching/table_of_content_stitching.markdown +++ b/doc/tutorials/stitching/table_of_content_stitching.markdown @@ -7,6 +7,8 @@ create a photo panorama or you want to stitch scans. - @subpage tutorial_stitcher + *Languages:* C++ + *Compatibility:* \>= OpenCV 3.2 *Author:* Jiri Horner diff --git a/doc/tutorials/videoio/table_of_content_videoio.markdown b/doc/tutorials/videoio/table_of_content_videoio.markdown index 4f62765115..b27726bd87 100644 --- a/doc/tutorials/videoio/table_of_content_videoio.markdown +++ b/doc/tutorials/videoio/table_of_content_videoio.markdown @@ -5,6 +5,8 @@ This section contains tutorials about how to read/save your video files. - @subpage tutorial_video_input_psnr_ssim + *Languages:* C++, Python + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor @@ -14,10 +16,16 @@ This section contains tutorials about how to read/save your video files. - @subpage tutorial_video_write + *Languages:* C++ + *Compatibility:* \> OpenCV 2.0 *Author:* Bernát Gábor - @subpage tutorial_kinect_openni + *Languages:* C++ + - @subpage tutorial_intelperc + + *Languages:* C++ \ No newline at end of file diff --git a/doc/tutorials/viz/table_of_content_viz.markdown b/doc/tutorials/viz/table_of_content_viz.markdown index fae1396de4..bcbebd153b 100644 --- a/doc/tutorials/viz/table_of_content_viz.markdown +++ b/doc/tutorials/viz/table_of_content_viz.markdown @@ -3,6 +3,8 @@ OpenCV Viz {#tutorial_table_of_content_viz} - @subpage tutorial_launching_viz + *Languages:* C++ + *Compatibility:* \> OpenCV 3.0.0 *Author:* Ozan Tonkal @@ -11,6 +13,8 @@ OpenCV Viz {#tutorial_table_of_content_viz} - @subpage tutorial_widget_pose + *Languages:* C++ + *Compatibility:* \> OpenCV 3.0.0 *Author:* Ozan Tonkal @@ -19,6 +23,8 @@ OpenCV Viz {#tutorial_table_of_content_viz} - @subpage tutorial_transformations + *Languages:* C++ + *Compatibility:* \> OpenCV 3.0.0 *Author:* Ozan Tonkal @@ -27,6 +33,8 @@ OpenCV Viz {#tutorial_table_of_content_viz} - @subpage tutorial_creating_widgets + *Languages:* C++ + *Compatibility:* \> OpenCV 3.0.0 *Author:* Ozan Tonkal @@ -35,6 +43,8 @@ OpenCV Viz {#tutorial_table_of_content_viz} - @subpage tutorial_histo3D + *Languages:* C++ + *Compatibility:* \> OpenCV 3.0.0 *Author:* Laurent Berger From b43da8aa5decc2e1cfef7673e73c5eb5f907611e Mon Sep 17 00:00:00 2001 From: R-penguins <38819653+R-penguins@users.noreply.github.com> Date: Thu, 7 May 2020 13:55:05 -0400 Subject: [PATCH 14/18] Update Image Watch Tutorial Updated the Windows Visual Studio Image Watch tutorial to include download links to the latest versions of Visual Studio Image Watch for newer Visual Studio versions. --- .../windows_visual_studio_image_watch.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.markdown b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.markdown index 6c957b3fd5..8bc2b32fff 100644 --- a/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.markdown +++ b/doc/tutorials/introduction/windows_visual_studio_image_watch/windows_visual_studio_image_watch.markdown @@ -18,8 +18,8 @@ This tutorial assumes that you have the following available: Installation ------------ -[Download](http://go.microsoft.com/fwlink/?LinkId=285460) the Image Watch installer. The installer -comes in a single file with extension .vsix (*Visual Studio Extension*). To launch it, simply +Download the Image Watch installer. ([Visual Studio 2019](https://marketplace.visualstudio.com/items?itemName=VisualCPPTeam.ImageWatch2019) | [Visual Studio 2017](https://marketplace.visualstudio.com/items?itemName=VisualCPPTeam.ImageWatch2017) | [Visual Studio 2012, 2013, 2015](https://marketplace.visualstudio.com/items?itemName=VisualCPPTeam.ImageWatch)) +The installer comes in a single file with extension .vsix (*Visual Studio Extension*). To launch it, simply double-click on the .vsix file in Windows Explorer. When the installer has finished, make sure to restart Visual Studio to complete the installation. From e9058ea8f596ea02ad66e389cb557856a03364b0 Mon Sep 17 00:00:00 2001 From: Rui Hou Date: Sun, 10 May 2020 11:20:27 -0400 Subject: [PATCH 15/18] Easier access to opencv.js in tutorial --- doc/js_tutorials/js_setup/js_setup/js_setup.markdown | 2 ++ doc/js_tutorials/js_setup/js_usage/js_usage.markdown | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown index 7d234acc9d..87167cd219 100644 --- a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown +++ b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown @@ -1,6 +1,8 @@ Build OpenCV.js {#tutorial_js_setup} =============================== +@note +You don't have to build your own copy if you simply want to start using it. Refer the Using Opencv.js tutorial for steps on getting a prebuilt copy from our releases or online documentation. Installing Emscripten ----------------------------- diff --git a/doc/js_tutorials/js_setup/js_usage/js_usage.markdown b/doc/js_tutorials/js_setup/js_usage/js_usage.markdown index 88aba1afd5..5f9f338f2d 100644 --- a/doc/js_tutorials/js_setup/js_usage/js_usage.markdown +++ b/doc/js_tutorials/js_setup/js_usage/js_usage.markdown @@ -4,7 +4,7 @@ Using OpenCV.js {#tutorial_js_usage} Steps ----- -In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page. +In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page. You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERISON_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/3.4.0/opencv.js](https://docs.opencv.org/3.4.0/opencv.js). Use `master` if you want the latest build). You can also build your own copy by following the tutorial on Build Opencv.js. ### Create a web page @@ -44,7 +44,7 @@ To run this web page, copy the content above and save to a local index.html file Set the URL of `opencv.js` to `src` attribute of \ tag. -@note For this tutorial, we host `opencv.js` at same folder as index.html. +@note For this tutorial, we host `opencv.js` at same folder as index.html. You can also choose to use the URL of the prebuilt `opencv.js` in our online documentation. Example for synchronous loading: @code{.js} From 48e9e651a478d935e3eb923dcd2e3ae503f72269 Mon Sep 17 00:00:00 2001 From: jsxyhelu Date: Sun, 10 May 2020 16:30:24 +0800 Subject: [PATCH 16/18] add DeepGreen colormap --- .../imgproc/doc/pics/colorscale_deepgreen.jpg | Bin 0 -> 1396 bytes modules/imgproc/include/opencv2/imgproc.hpp | 3 ++- modules/imgproc/src/colormap.cpp | 23 ++++++++++++++++++ 3 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 modules/imgproc/doc/pics/colorscale_deepgreen.jpg diff --git a/modules/imgproc/doc/pics/colorscale_deepgreen.jpg b/modules/imgproc/doc/pics/colorscale_deepgreen.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a67579c75f5173b23e7c98d5fe02159b440a5ad GIT binary patch literal 1396 zcmex=iF;N$`UAd82aiwDF383NJD#LCRf%Eivc4pu@E@&5pWAP0jSBLg#|5(ASU zBeNjm|04|YKzFi&odL?6mQqXwbzED#l4gO`Kd};u4Zls%q*Qnp!5NX66=_R?aT2 zZtfnQUcn)uVc`*xQOPN(Y3Ui6S;Zx#W#tu>Rn0A}ZS5VMU6UqHnL2IyjG40*Enc#8 z+42=DS8dw7W$U)>J9h3mboj{8W5-XNJay^vm8;jT-?(|};iJb-o<4j2;^nK4pFV&2 z`tAFVpT9u<0{IgLu=-07=r1Nv9I%7@#mH0+#LR*$tcr$gLXLs#iG{*SMvWXIP7@by zJjkhR9P~jnspuk?n2O0m)sG;rfqh1t$C}9U8QfzCf8Ao>VP*tI9{)JO3lw>+*_UVY%}^T${Q)@XLC&ZO1#~(=ur4mKSzgm#6VT;-1&LNVvq>3 z=&i%^t-H*0?Ax~K0BuRWQO}zb0d&!~56{mfZ8yuQuFXz5zWv%pps`@a-aYot&kfzq zPR)NY|26xSFjgode5?0E8_oX=qO!{xFV;2f_GMo5m$k5p-SrpOTNA$SFQPub)@)vQ z>xJ#=Z5OQ8UkMjp@NL~~hgbUpBR0P`x_j|fhW+8#bs(KP*0TS|-g?1G|3a9N-EX~{ zvg?{}?T%%CuUiqjPuRy7ut8`GA!U{W?rbx z2F25|#z z#2w#$X%i^Qvu}L+6|QUF^Wme~+4c|DjsmeQgucGfxfmFBAQ5EIt?hQ@A~_MzfIq(B zpVW;Fz=(hQ@cf*kc{xDSt{v^q2gVGPQC(5jelDqZw$}ZN_Sg7Vtl@?-)@MyWR1y47 z%NiW?WwSs*|L6-SSZ-y=ioejB`86`#dFzGh@Z1HZ@hjG;JHCzG)_ApU(Z=+9sk;|% zxlrG>I~E*iQLvC*lT!OFc9V51`&M9R--~**MegdY7xtCeVzu8QF94(B;kxS%FZMge Xgf;&$z4ZcUhqhYnv*=y+|8D{SyP9aC literal 0 HcmV?d00001 diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 97dc794fee..21bb012dcc 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -4376,7 +4376,8 @@ enum ColormapTypes COLORMAP_CIVIDIS = 17, //!< ![cividis](pics/colormaps/colorscale_cividis.jpg) COLORMAP_TWILIGHT = 18, //!< ![twilight](pics/colormaps/colorscale_twilight.jpg) COLORMAP_TWILIGHT_SHIFTED = 19, //!< ![twilight shifted](pics/colormaps/colorscale_twilight_shifted.jpg) - COLORMAP_TURBO = 20 //!< ![turbo](pics/colormaps/colorscale_turbo.jpg) + COLORMAP_TURBO = 20, //!< ![turbo](pics/colormaps/colorscale_turbo.jpg) + COLORMAP_DEEPGREEN = 21 //!< ![deepgreen](pics/colormaps/colorscale_deepgreen.jpg) }; /** @example samples/cpp/falsecolor.cpp diff --git a/modules/imgproc/src/colormap.cpp b/modules/imgproc/src/colormap.cpp index c56883436e..26371edad6 100644 --- a/modules/imgproc/src/colormap.cpp +++ b/modules/imgproc/src/colormap.cpp @@ -297,6 +297,28 @@ namespace colormap } }; + // Equals the colormap "deepgreen". + class DeepGreen : public ColorMap { + public: + DeepGreen() : ColorMap() { + init(256); + } + DeepGreen(int n) : ColorMap() { + init(n); + } + void init(int n) { + static const float r[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04761904761904762f, 0.09523809523809523f, 0.1428571428571428f, 0.1904761904761905f, 0.2380952380952381f, 0.2857142857142857f, 0.3333333333333333f, 0.3809523809523809f, 0.4285714285714285f, 0.4761904761904762f, 0.5238095238095238f, 0.5714285714285714f, 0.6190476190476191f, 0.6666666666666666f, 0.7142857142857143f, 0.7619047619047619f, 0.8095238095238095f, 0.8571428571428571f, 0.9047619047619048f, 0.9523809523809523f, 1 }; + static const float g[] = { 0, 0.01587301587301587f, 0.03174603174603174f, 0.04761904761904762f, 0.06349206349206349f, 0.07936507936507936f, 0.09523809523809523f, 0.1111111111111111f, 0.126984126984127f, 0.1428571428571428f, 0.1587301587301587f, 0.1746031746031746f, 0.1904761904761905f, 0.2063492063492063f, 0.2222222222222222f, 0.2380952380952381f, 0.253968253968254f, 0.2698412698412698f, 0.2857142857142857f, 0.3015873015873016f, 0.3174603174603174f, 0.3333333333333333f, 0.3492063492063492f, 0.3650793650793651f, 0.3809523809523809f, 0.3968253968253968f, 0.4126984126984127f, 0.4285714285714285f, 0.4444444444444444f, 0.4603174603174603f, 0.4761904761904762f, 0.492063492063492f, 0.5079365079365079f, 0.5238095238095238f, 0.5396825396825397f, 0.5555555555555556f, 0.5714285714285714f, 0.5873015873015873f, 0.6031746031746031f, 0.6190476190476191f, 0.6349206349206349f, 0.6507936507936508f, 0.6666666666666666f, 0.6825396825396826f, 0.6984126984126984f, 0.7142857142857143f, 0.7301587301587301f, 0.746031746031746f, 0.7619047619047619f, 0.7777777777777778f, 0.7936507936507936f, 0.8095238095238095f, 0.8253968253968254f, 0.8412698412698413f, 0.8571428571428571f, 0.873015873015873f, 0.8888888888888888f, 0.9047619047619048f, 0.9206349206349206f, 0.9365079365079365f, 0.9523809523809523f, 0.9682539682539683f, 0.9841269841269841f, 1 }; + static const float b[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02380952380952381f, 0.04761904761904762f, 0.07142857142857142f, 0.09523809523809523f, 0.119047619047619f, 0.1428571428571428f, 0.1666666666666667f, 0.1904761904761905f, 0.2142857142857143f, 0.2380952380952381f, 0.2619047619047619f, 0.2857142857142857f, 0.3095238095238095f, 0.3333333333333333f, 0.3571428571428572f, 0.3809523809523809f, 0.4047619047619048f, 0.4285714285714285f, 0.4523809523809524f, 0.4761904761904762f, 0.5f, 0.5238095238095238f, 0.5476190476190477f, 0.5714285714285714f, 0.5952380952380952f, 0.6190476190476191f, 0.6428571428571429f, 0.6666666666666666f, 0.6904761904761905f, 0.7142857142857143f, 0.7380952380952381f, 0.7619047619047619f, 0.7857142857142857f, 0.8095238095238095f, 0.8333333333333334f, 0.8571428571428571f, 0.8809523809523809f, 0.9047619047619048f, 0.9285714285714286f, 0.9523809523809523f, 0.9761904761904762f, 1 }; + Mat X = linspace(0, 1, 64); + this->_lut = ColorMap::linear_colormap(X, + Mat(64, 1, CV_32FC1, (void*)r).clone(), // red + Mat(64, 1, CV_32FC1, (void*)g).clone(), // green + Mat(64, 1, CV_32FC1, (void*)b).clone(), // blue + n); // number of sample points + } + }; + // Equals the GNU Octave colormap "ocean". class Ocean : public ColorMap { public: @@ -742,6 +764,7 @@ namespace colormap colormap == COLORMAP_BONE ? (colormap::ColorMap*)(new colormap::Bone) : colormap == COLORMAP_CIVIDIS ? (colormap::ColorMap*)(new colormap::Cividis) : colormap == COLORMAP_COOL ? (colormap::ColorMap*)(new colormap::Cool) : + colormap == COLORMAP_DEEPGREEN ? (colormap::ColorMap*)(new colormap::DeepGreen) : colormap == COLORMAP_HOT ? (colormap::ColorMap*)(new colormap::Hot) : colormap == COLORMAP_HSV ? (colormap::ColorMap*)(new colormap::HSV) : colormap == COLORMAP_INFERNO ? (colormap::ColorMap*)(new colormap::Inferno) : From 79f8b7fd73e547880658904bd81a5a0eb3c5fcbe Mon Sep 17 00:00:00 2001 From: Liubov Batanina Date: Tue, 12 May 2020 15:33:57 +0300 Subject: [PATCH 17/18] Merge pull request #17233 from l-bat:onnx_bn * Added ONNX BatchNorm subgraph * Move removing constant inputs to addConstantNodesForInitializers * Added initializers to ONNXGraphWrapper --- .../dnn/src/onnx/onnx_graph_simplifier.cpp | 84 +++++++++++++------ modules/dnn/src/onnx/onnx_importer.cpp | 19 ----- modules/dnn/test/test_onnx_importer.cpp | 7 ++ 3 files changed, 66 insertions(+), 44 deletions(-) diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp index b5bb92e92a..61ef8b7da6 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp @@ -61,27 +61,28 @@ public: ONNXGraphWrapper(opencv_onnx::GraphProto& _net) : net(_net) { numInputs = net.input_size(); + numInitializers = net.initializer_size(); } virtual Ptr getNode(int idx) const CV_OVERRIDE { opencv_onnx::NodeProto* node = 0; - if (idx >= numInputs) - node = net.mutable_node(idx - numInputs); + if (idx >= numInputs + numInitializers) + node = net.mutable_node(idx - numInputs - numInitializers); return makePtr(node); } virtual int getNumNodes() const CV_OVERRIDE { - return numInputs + net.node_size(); + return numInputs + numInitializers + net.node_size(); } virtual int getNumOutputs(int nodeId) const CV_OVERRIDE { - if (nodeId < numInputs) + if (nodeId < numInputs + numInitializers) return 1; else - return net.node(nodeId - numInputs).output_size(); + return net.node(nodeId - numInputs - numInitializers).output_size(); } virtual std::string getOutputName(int nodeId, int outId) const CV_OVERRIDE @@ -89,18 +90,20 @@ public: CV_Assert(outId < getNumOutputs(nodeId)); if (nodeId < numInputs) return net.input(nodeId).name(); + else if (nodeId < numInputs + numInitializers) + return net.initializer(nodeId - numInputs).name(); else - return net.node(nodeId - numInputs).output(outId); + return net.node(nodeId - numInputs - numInitializers).output(outId); } virtual void removeNode(int idx) CV_OVERRIDE { - CV_Assert(idx >= numInputs); - net.mutable_node()->DeleteSubrange(idx - numInputs, 1); + CV_Assert(idx >= numInputs + numInitializers); + net.mutable_node()->DeleteSubrange(idx - numInputs - numInitializers, 1); } private: - int numInputs; + int numInputs, numInitializers; opencv_onnx::GraphProto& net; }; @@ -382,33 +385,63 @@ public: } }; -class BatchNormalizationSubgraph : public Subgraph +class BatchNormalizationSubgraphBase : public Subgraph { public: - BatchNormalizationSubgraph() + BatchNormalizationSubgraphBase() { - int input = addNodeToMatch(""); - int data1 = addNodeToMatch("Constant"); - int data2 = addNodeToMatch("Constant"); - int data3 = addNodeToMatch("Constant"); - int data4 = addNodeToMatch("Constant"); - int shape1 = addNodeToMatch("Constant"); - int reshape1 = addNodeToMatch("Reshape", data1, shape1); - int shape2 = addNodeToMatch("Constant"); - int reshape2 = addNodeToMatch("Reshape", data2, shape2); + input = addNodeToMatch(""); + var = addNodeToMatch(""); + mean = addNodeToMatch(""); + weight = addNodeToMatch(""); + bias = addNodeToMatch(""); + A = addNodeToMatch(""); + shape1 = addNodeToMatch(""); + shape2 = addNodeToMatch(""); + } +protected: + int input, var, mean, weight, bias, A, shape1, shape2; +}; + +class BatchNormalizationSubgraph1 : public BatchNormalizationSubgraphBase +{ +public: + BatchNormalizationSubgraph1() + { + int reshape1 = addNodeToMatch("Reshape", weight, shape1); + int reshape2 = addNodeToMatch("Reshape", bias, shape2); int shape3 = addNodeToMatch("Constant"); - int reshape3 = addNodeToMatch("Reshape", data3, shape3); + int reshape3 = addNodeToMatch("Reshape", var, shape3); int shape4 = addNodeToMatch("Constant"); - int reshape4 = addNodeToMatch("Reshape", data4, shape4); + int reshape4 = addNodeToMatch("Reshape", mean, shape4); int sqrtNode = addNodeToMatch("Sqrt", reshape3); - int A = addNodeToMatch("Constant"); int divNode = addNodeToMatch("Div", A, sqrtNode); int mul1 = addNodeToMatch("Mul", reshape1, divNode); int mul2 = addNodeToMatch("Mul", reshape4, mul1); int sub = addNodeToMatch("Sub", reshape2, mul2); int mul3 = addNodeToMatch("Mul", input, mul1); addNodeToMatch("Add", mul3, sub); - setFusedNode("BatchNormalization", input, data1, data2, data4 ,data3); + setFusedNode("BatchNormalization", input, weight, bias, mean, var); + } +}; + +class BatchNormalizationSubgraph2 : public BatchNormalizationSubgraphBase +{ +public: + BatchNormalizationSubgraph2() + { + int sqrtNode = addNodeToMatch("Sqrt", var); + int divNode = addNodeToMatch("Div", A, sqrtNode); + int mul1 = addNodeToMatch("Mul", weight, divNode); + int reshape2 = addNodeToMatch("Reshape", mul1, shape2); + + int mulMean = addNodeToMatch("Mul", mean, mul1); + int sub = addNodeToMatch("Sub", bias, mulMean); + int reshape1 = addNodeToMatch("Reshape", sub, shape1); + + int mulInput = addNodeToMatch("Mul", input, reshape2); + addNodeToMatch("Add", mulInput, reshape1); + setFusedNode("BatchNormalization", input, weight, bias, mean, var); } }; @@ -424,7 +457,8 @@ void simplifySubgraphs(opencv_onnx::GraphProto& net) subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); - subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); simplifySubgraphs(Ptr(new ONNXGraphWrapper(net)), subgraphs); } diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 92fc3845c3..2b0d846721 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -309,30 +309,11 @@ static void addConstant(const std::string& name, outShapes.insert(std::make_pair(name, shape(blob))); } -void addConstantNodesForInitializers(opencv_onnx::GraphProto& graph_proto) -{ - int num_initializers = graph_proto.initializer_size(); - for (int id = 0; id < num_initializers; id++) - { - opencv_onnx::TensorProto initializer = graph_proto.initializer(id); - opencv_onnx::NodeProto* constant_node = graph_proto.add_node(); - constant_node->set_op_type("Constant"); - constant_node->set_name(initializer.name()); - constant_node->add_output(initializer.name()); - opencv_onnx::AttributeProto* value = constant_node->add_attribute(); - opencv_onnx::TensorProto* tensor = initializer.New(); - tensor->CopyFrom(initializer); - releaseONNXTensor(initializer); - value->set_allocated_t(tensor); - } -} - void ONNXImporter::populateNet(Net dstNet) { CV_Assert(model_proto.has_graph()); opencv_onnx::GraphProto graph_proto = model_proto.graph(); - addConstantNodesForInitializers(graph_proto); simplifySubgraphs(graph_proto); std::map constBlobs = getGraphTensors(graph_proto); diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index e0a4d4f665..cfffc9629a 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -306,6 +306,13 @@ TEST_P(Test_ONNX_layers, BatchNormalizationUnfused) testONNXModels("frozenBatchNorm2d"); } +TEST_P(Test_ONNX_layers, BatchNormalizationSubgraph) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + testONNXModels("batch_norm_subgraph"); +} + TEST_P(Test_ONNX_layers, Transpose) { if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) From f30931848eb7cbb526e9dac7cdf7d91070e1ec25 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Tue, 17 Dec 2019 15:55:45 +0300 Subject: [PATCH 18/18] CAP_MFX: surface pool with timeout, more parameters --- modules/videoio/src/cap_mfx_common.cpp | 43 +++++++++++++++++++++++--- modules/videoio/src/cap_mfx_common.hpp | 32 ++++++++++++++++--- modules/videoio/src/cap_mfx_reader.cpp | 2 +- modules/videoio/src/cap_mfx_writer.cpp | 18 +++++++++-- 4 files changed, 82 insertions(+), 13 deletions(-) diff --git a/modules/videoio/src/cap_mfx_common.cpp b/modules/videoio/src/cap_mfx_common.cpp index 55c2450b80..268d8c6055 100644 --- a/modules/videoio/src/cap_mfx_common.cpp +++ b/modules/videoio/src/cap_mfx_common.cpp @@ -14,10 +14,30 @@ using namespace std; using namespace cv; +static mfxIMPL getImpl() +{ + static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_IMPL", MFX_IMPL_AUTO_ANY); + return (mfxIMPL)res; +} + +static size_t getExtraSurfaceNum() +{ + static const size_t res = cv::utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_EXTRA_SURFACE_NUM", 1); + return res; +} + +static size_t getPoolTimeoutSec() +{ + static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_POOL_TIMEOUT", 1); + return res; +} + +//================================================================================================== + bool DeviceHandler::init(MFXVideoSession &session) { mfxStatus res = MFX_ERR_NONE; - mfxIMPL impl = MFX_IMPL_AUTO_ANY; + mfxIMPL impl = getImpl(); mfxVersion ver = { {19, 1} }; res = session.Init(impl, &ver); @@ -114,11 +134,26 @@ SurfacePool::~SurfacePool() { } +SurfacePool * SurfacePool::_create(const mfxFrameAllocRequest &request, const mfxVideoParam ¶ms) +{ + return new SurfacePool(request.Info.Width, + request.Info.Height, + saturate_cast((size_t)request.NumFrameSuggested + getExtraSurfaceNum()), + params.mfx.FrameInfo); +} + mfxFrameSurface1 *SurfacePool::getFreeSurface() { - for(std::vector::iterator i = surfaces.begin(); i != surfaces.end(); ++i) - if (!i->Data.Locked) - return &(*i); + const int64 start = cv::getTickCount(); + do + { + for(std::vector::iterator i = surfaces.begin(); i != surfaces.end(); ++i) + if (!i->Data.Locked) + return &(*i); + sleep_ms(10); + } + while((cv::getTickCount() - start) / cv::getTickFrequency() < getPoolTimeoutSec()); // seconds + DBG(cout << "No free surface!" << std::endl); return 0; } diff --git a/modules/videoio/src/cap_mfx_common.hpp b/modules/videoio/src/cap_mfx_common.hpp index b4d2d9a2b6..bef6d34ae9 100644 --- a/modules/videoio/src/cap_mfx_common.hpp +++ b/modules/videoio/src/cap_mfx_common.hpp @@ -6,6 +6,7 @@ #define MFXHELPER_H #include "opencv2/core.hpp" +#include "opencv2/core/utils/configuration.private.hpp" #include #include @@ -259,11 +260,10 @@ public: DBG(std::cout << "MFX QueryIOSurf: " << res << std::endl); if (res < MFX_ERR_NONE) return 0; - return new SurfacePool(request.Info.Width, - request.Info.Height, - request.NumFrameSuggested, - params.mfx.FrameInfo); + return _create(request, params); } +private: + static SurfacePool* _create(const mfxFrameAllocRequest& request, const mfxVideoParam& params); private: SurfacePool(const SurfacePool &); SurfacePool &operator=(const SurfacePool &); @@ -285,6 +285,29 @@ protected: }; +// TODO: move to core::util? +#ifdef CV_CXX11 +#include +static void sleep_ms(int64 ms) +{ + std::this_thread::sleep_for(std::chrono::milliseconds(ms)); +} +#elif defined(__linux__) +#include +static void sleep_ms(int64 ms) +{ + nanosleep(ms * 1000 * 1000); +} +#elif defined _WIN32 +static void sleep_ms(int64 ms) +{ + Sleep(ms); +} +#else +#error "Can not detect sleep_ms() implementation" +#endif + + // Linux specific #ifdef __linux__ @@ -310,7 +333,6 @@ private: #ifdef _WIN32 #include -inline void sleep(unsigned long sec) { Sleep(1000 * sec); } class DXHandle : public DeviceHandler { public: diff --git a/modules/videoio/src/cap_mfx_reader.cpp b/modules/videoio/src/cap_mfx_reader.cpp index f2ec1c42d4..7df2cf56af 100644 --- a/modules/videoio/src/cap_mfx_reader.cpp +++ b/modules/videoio/src/cap_mfx_reader.cpp @@ -214,7 +214,7 @@ bool VideoCapture_IntelMFX::grabFrame() else if (res == MFX_WRN_DEVICE_BUSY) { DBG(cout << "Waiting for device" << endl); - sleep(1); + sleep_ms(1000); continue; } else if (res == MFX_WRN_VIDEO_PARAM_CHANGED) diff --git a/modules/videoio/src/cap_mfx_writer.cpp b/modules/videoio/src/cap_mfx_writer.cpp index 8a93fc7e53..1279141a8d 100644 --- a/modules/videoio/src/cap_mfx_writer.cpp +++ b/modules/videoio/src/cap_mfx_writer.cpp @@ -10,6 +10,18 @@ using namespace std; using namespace cv; +static size_t getBitrateDivisor() +{ + static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_BITRATE_DIVISOR", 300); + return res; +} + +static mfxU32 getWriterTimeoutMS() +{ + static const size_t res = utils::getConfigurationParameterSizeT("OPENCV_VIDEOIO_MFX_WRITER_TIMEOUT", 1); + return saturate_cast(res * 1000); // convert from seconds +} + inline mfxU32 codecIdByFourCC(int fourcc) { const int CC_MPG2 = FourCC('M', 'P', 'G', '2').vali32; @@ -77,7 +89,7 @@ VideoWriter_IntelMFX::VideoWriter_IntelMFX(const String &filename, int _fourcc, memset(¶ms, 0, sizeof(params)); params.mfx.CodecId = codecId; params.mfx.TargetUsage = MFX_TARGETUSAGE_BALANCED; - params.mfx.TargetKbps = (mfxU16)cvRound(frameSize.area() * fps / 500); // TODO: set in options + params.mfx.TargetKbps = saturate_cast((frameSize.area() * fps) / (42.6666 * getBitrateDivisor())); // TODO: set in options params.mfx.RateControlMethod = MFX_RATECONTROL_VBR; params.mfx.FrameInfo.FrameRateExtN = cvRound(fps * 1000); params.mfx.FrameInfo.FrameRateExtD = 1000; @@ -210,7 +222,7 @@ bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr) res = encoder->EncodeFrameAsync(NULL, workSurface, &bs->stream, &sync); if (res == MFX_ERR_NONE) { - res = session->SyncOperation(sync, 1000); // 1 sec, TODO: provide interface to modify timeout + res = session->SyncOperation(sync, getWriterTimeoutMS()); // TODO: provide interface to modify timeout if (res == MFX_ERR_NONE) { // ready to write @@ -239,7 +251,7 @@ bool VideoWriter_IntelMFX::write_one(cv::InputArray bgr) else if (res == MFX_WRN_DEVICE_BUSY) { DBG(cout << "Waiting for device" << endl); - sleep(1); + sleep_ms(1000); continue; } else