diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 4fad59dcc6..cf19934116 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -11,8 +11,44 @@ if(BUILD_DOCS AND HAVE_SPHINX) project(opencv_docs) - file(GLOB_RECURSE OPENCV_FILES_REF ../modules/*.rst) - file(GLOB_RECURSE OPENCV_FILES_REF_PICT ../modules/*.png ../modules/*.jpg) + set(OPENCV2_BASE_MODULES core imgproc highgui video calib3d features2d objdetect ml flann gpu photo stitching nonfree contrib legacy) + + # build lists of modules to be documented + set(OPENCV2_MODULES "") + set(OPENCV_MODULES "") + + foreach(mod ${OPENCV_MODULES_BUILD} ${OPENCV_MODULES_DISABLED_USER} ${OPENCV_MODULES_DISABLED_AUTO} ${OPENCV_MODULES_DISABLED_FORCE}) + string(REGEX REPLACE "^opencv_" "" mod "${mod}") + if("${OPENCV_MODULE_opencv_${mod}_LOCATION}" STREQUAL "${OpenCV_SOURCE_DIR}/modules/${mod}") + list(APPEND OPENCV2_MODULES ${mod}) + else() + list(APPEND OPENCV_MODULES ${mod}) + endif() + endforeach() + list(REMOVE_ITEM OPENCV2_MODULES ${OPENCV2_BASE_MODULES}) + ocv_list_sort(OPENCV2_MODULES) + ocv_list_sort(OPENCV_MODULES) + + # build lists of documentation files and generate table of contents for reference manual + set(OPENCV_FILES_REF "") + set(OPENCV_FILES_REF_PICT "") + set(OPENCV_REFMAN_TOC "") + + foreach(mod ${OPENCV2_BASE_MODULES} ${OPENCV2_MODULES} ${OPENCV_MODULES}) + file(GLOB_RECURSE _OPENCV_FILES_REF "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/*.rst") + file(GLOB_RECURSE _OPENCV_FILES_REF_PICT "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/*.png" "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/*.jpg") + list(APPEND OPENCV_FILES_REF ${_OPENCV_FILES_REF}) + list(APPEND OPENCV_FILES_REF_PICT ${_OPENCV_FILES_REF_PICT}) + + set(toc_file "${OPENCV_MODULE_opencv_${mod}_LOCATION}/doc/${mod}.rst") + if(EXISTS "${toc_file}") + file(RELATIVE_PATH toc_file "${OpenCV_SOURCE_DIR}/modules" "${toc_file}") + set(OPENCV_REFMAN_TOC "${OPENCV_REFMAN_TOC} ${toc_file}\r\n") + endif() + endforeach() + + configure_file("${OpenCV_SOURCE_DIR}/modules/refman.rst.in" "${OpenCV_SOURCE_DIR}/modules/refman.rst" IMMEDIATE @ONLY) + file(GLOB_RECURSE OPENCV_FILES_UG user_guide/*.rst) file(GLOB_RECURSE OPENCV_FILES_TUT tutorials/*.rst) file(GLOB_RECURSE OPENCV_FILES_TUT_PICT tutorials/*.png tutorials/*.jpg) @@ -54,6 +90,6 @@ if(BUILD_DOCS AND HAVE_SPHINX) set_target_properties(html_docs PROPERTIES FOLDER "documentation") endif() endif() - + install(FILES ${FILES_DOC} DESTINATION "${OPENCV_DOC_INSTALL_PATH}" COMPONENT main) install(FILES ${FILES_DOC_VS} DESTINATION "${OPENCV_DOC_INSTALL_PATH}/vidsurv" COMPONENT main) diff --git a/modules/features2d/src/fast.cpp b/modules/features2d/src/fast.cpp index 2bc25dd359..9495d35785 100644 --- a/modules/features2d/src/fast.cpp +++ b/modules/features2d/src/fast.cpp @@ -16,8 +16,8 @@ are met: notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - *Neither the name of the University of Cambridge nor the names of - its contributors may be used to endorse or promote products derived + *Neither the name of the University of Cambridge nor the names of + its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS @@ -35,7 +35,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* The references are: - * Machine learning for high-speed corner detection, + * Machine learning for high-speed corner detection, E. Rosten and T. Drummond, ECCV 2006 * Faster and better: A machine learning approach to corner detection E. Rosten, R. Porter and T. Drummond, PAMI, 2009 @@ -64,7 +64,7 @@ static void makeOffsets(int pixel[], int row_stride) pixel[13] = -3 + row_stride * 1; pixel[14] = -2 + row_stride * 2; pixel[15] = -1 + row_stride * 3; -} +} static int cornerScore(const uchar* ptr, const int pixel[], int threshold) { @@ -73,7 +73,7 @@ static int cornerScore(const uchar* ptr, const int pixel[], int threshold) short d[N]; for( k = 0; k < N; k++ ) d[k] = (short)(v - ptr[pixel[k]]); - + #if CV_SSE2 __m128i q0 = _mm_set1_epi16(-1000), q1 = _mm_set1_epi16(1000); for( k = 0; k < 16; k += 8 ) @@ -128,7 +128,7 @@ static int cornerScore(const uchar* ptr, const int pixel[], int threshold) a0 = std::max(a0, std::min(a, (int)d[k])); a0 = std::max(a0, std::min(a, (int)d[k+9])); } - + int b0 = -a0; for( k = 0; k < 16; k += 2 ) { @@ -141,14 +141,14 @@ static int cornerScore(const uchar* ptr, const int pixel[], int threshold) b = std::max(b, (int)d[k+6]); b = std::max(b, (int)d[k+7]); b = std::max(b, (int)d[k+8]); - + b0 = std::min(b0, std::max(b, (int)d[k])); b0 = std::min(b0, std::max(b, (int)d[k+9])); } - + threshold = -b0-1; #endif - + #if 0 // check that with the computed "threshold" the pixel is still a corner // and that with the increased-by-1 "threshold" the pixel is not a corner anymore @@ -157,7 +157,7 @@ static int cornerScore(const uchar* ptr, const int pixel[], int threshold) int v0 = std::min(ptr[0] + threshold + delta, 255); int v1 = std::max(ptr[0] - threshold - delta, 0); int c0 = 0, c1 = 0; - + for( int k = 0; k < N; k++ ) { int x = ptr[pixel[k]]; @@ -184,7 +184,7 @@ static int cornerScore(const uchar* ptr, const int pixel[], int threshold) #endif return threshold; } - + void FAST(InputArray _img, std::vector& keypoints, int threshold, bool nonmax_suppression) { @@ -214,7 +214,7 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool cpbuf[1] = cpbuf[0] + img.cols + 1; cpbuf[2] = cpbuf[1] + img.cols + 1; memset(buf[0], 0, img.cols*3); - + for(i = 3; i < img.rows-2; i++) { const uchar* ptr = img.ptr(i) + 3; @@ -222,7 +222,7 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool int* cornerpos = cpbuf[(i - 3)%3]; memset(curr, 0, img.cols); int ncorners = 0; - + if( i < img.rows - 3 ) { j = 3; @@ -233,7 +233,7 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool __m128i v0 = _mm_loadu_si128((const __m128i*)ptr); __m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta); v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta); - + __m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta); __m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[4])), delta); __m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[8])), delta); @@ -256,24 +256,24 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool ptr -= 8; continue; } - + __m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0; for( k = 0; k < N; k++ ) { __m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta); m0 = _mm_cmpgt_epi8(x, v0); m1 = _mm_cmpgt_epi8(v1, x); - + c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0); c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1); - + max0 = _mm_max_epu8(max0, c0); max1 = _mm_max_epu8(max1, c1); } - + max0 = _mm_max_epu8(max0, max1); int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16)); - + for( k = 0; m > 0 && k < 16; k++, m >>= 1 ) if(m & 1) { @@ -288,26 +288,26 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool int v = ptr[0]; const uchar* tab = &threshold_tab[0] - v + 255; int d = tab[ptr[pixel[0]]] | tab[ptr[pixel[8]]]; - + if( d == 0 ) continue; - + d &= tab[ptr[pixel[2]]] | tab[ptr[pixel[10]]]; d &= tab[ptr[pixel[4]]] | tab[ptr[pixel[12]]]; d &= tab[ptr[pixel[6]]] | tab[ptr[pixel[14]]]; - + if( d == 0 ) continue; - + d &= tab[ptr[pixel[1]]] | tab[ptr[pixel[9]]]; d &= tab[ptr[pixel[3]]] | tab[ptr[pixel[11]]]; d &= tab[ptr[pixel[5]]] | tab[ptr[pixel[13]]]; d &= tab[ptr[pixel[7]]] | tab[ptr[pixel[15]]]; - + if( d & 1 ) { int vt = v - threshold, count = 0; - + for( k = 0; k < N; k++ ) { int x = ptr[pixel[k]]; @@ -325,11 +325,11 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool count = 0; } } - + if( d & 2 ) { int vt = v + threshold, count = 0; - + for( k = 0; k < N; k++ ) { int x = ptr[pixel[k]]; @@ -349,17 +349,17 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool } } } - + cornerpos[-1] = ncorners; - + if( i == 3 ) continue; - + const uchar* prev = buf[(i - 4 + 3)%3]; const uchar* pprev = buf[(i - 5 + 3)%3]; cornerpos = cpbuf[(i - 4 + 3)%3]; ncorners = cornerpos[-1]; - + for( k = 0; k < ncorners; k++ ) { j = cornerpos[k]; @@ -375,7 +375,7 @@ void FAST(InputArray _img, std::vector& keypoints, int threshold, bool } } - + /* * FastFeatureDetector */ diff --git a/modules/features2d/src/orb.cpp b/modules/features2d/src/orb.cpp index ed838d314f..32b4849c06 100644 --- a/modules/features2d/src/orb.cpp +++ b/modules/features2d/src/orb.cpp @@ -53,31 +53,31 @@ static void HarrisResponses(const Mat& img, vector& pts, int blockSize, float harris_k) { CV_Assert( img.type() == CV_8UC1 && blockSize*blockSize <= 2048 ); - + size_t ptidx, ptsize = pts.size(); - + const uchar* ptr00 = img.ptr(); int step = (int)(img.step/img.elemSize1()); int r = blockSize/2; - + float scale = (1 << 2) * blockSize * 255.0f; scale = 1.0f / scale; float scale_sq_sq = scale * scale * scale * scale; - + AutoBuffer ofsbuf(blockSize*blockSize); int* ofs = ofsbuf; for( int i = 0; i < blockSize; i++ ) for( int j = 0; j < blockSize; j++ ) ofs[i*blockSize + j] = (int)(i*step + j); - + for( ptidx = 0; ptidx < ptsize; ptidx++ ) { int x0 = cvRound(pts[ptidx].pt.x - r); int y0 = cvRound(pts[ptidx].pt.y - r); - + const uchar* ptr0 = ptr00 + y0*step + x0; int a = 0, b = 0, c = 0; - + for( int k = 0; k < blockSize*blockSize; k++ ) { const uchar* ptr = ptr0 + ofs[k]; @@ -98,13 +98,13 @@ static float IC_Angle(const Mat& image, const int half_k, Point2f pt, const vector & u_max) { int m_01 = 0, m_10 = 0; - + const uchar* center = &image.at (cvRound(pt.y), cvRound(pt.x)); - + // Treat the center line differently, v=0 for (int u = -half_k; u <= half_k; ++u) m_10 += u * center[u]; - + // Go line by line in the circular patch int step = (int)image.step1(); for (int v = 1; v <= half_k; ++v) @@ -120,7 +120,7 @@ static float IC_Angle(const Mat& image, const int half_k, Point2f pt, } m_01 += v * v_sum; } - + return fastAtan2((float)m_01, (float)m_10); } @@ -134,10 +134,10 @@ static void computeOrbDescriptor(const KeyPoint& kpt, //angle = cvFloor(angle/12)*12.f; angle *= (float)(CV_PI/180.f); float a = (float)cos(angle), b = (float)sin(angle); - + const uchar* center = &img.at(cvRound(kpt.pt.y), cvRound(kpt.pt.x)); int step = (int)img.step; - + #if 1 #define GET_VALUE(idx) \ center[cvRound(pattern[idx].x*b + pattern[idx].y*a)*step + \ @@ -153,7 +153,7 @@ static void computeOrbDescriptor(const KeyPoint& kpt, cvRound(center[iy*step + ix]*(1-x)*(1-y) + center[(iy+1)*step + ix]*(1-x)*y + \ center[iy*step + ix+1]*x*(1-y) + center[(iy+1)*step + ix+1]*x*y)) #endif - + if( WTA_K == 2 ) { for (int i = 0; i < dsize; ++i, pattern += 16) @@ -175,7 +175,7 @@ static void computeOrbDescriptor(const KeyPoint& kpt, val |= (t0 < t1) << 6; t0 = GET_VALUE(14); t1 = GET_VALUE(15); val |= (t0 < t1) << 7; - + desc[i] = (uchar)val; } } @@ -186,16 +186,16 @@ static void computeOrbDescriptor(const KeyPoint& kpt, int t0, t1, t2, val; t0 = GET_VALUE(0); t1 = GET_VALUE(1); t2 = GET_VALUE(2); val = t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0); - + t0 = GET_VALUE(3); t1 = GET_VALUE(4); t2 = GET_VALUE(5); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 2; - + t0 = GET_VALUE(6); t1 = GET_VALUE(7); t2 = GET_VALUE(8); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 4; - + t0 = GET_VALUE(9); t1 = GET_VALUE(10); t2 = GET_VALUE(11); val |= (t2 > t1 ? (t2 > t0 ? 2 : 0) : (t1 > t0)) << 6; - + desc[i] = (uchar)val; } } @@ -211,7 +211,7 @@ static void computeOrbDescriptor(const KeyPoint& kpt, if( t3 > t2 ) t2 = t3, v = 3; k = t0 > t2 ? u : v; val = k; - + t0 = GET_VALUE(4); t1 = GET_VALUE(5); t2 = GET_VALUE(6); t3 = GET_VALUE(7); u = 0, v = 2; @@ -219,7 +219,7 @@ static void computeOrbDescriptor(const KeyPoint& kpt, if( t3 > t2 ) t2 = t3, v = 3; k = t0 > t2 ? u : v; val |= k << 2; - + t0 = GET_VALUE(8); t1 = GET_VALUE(9); t2 = GET_VALUE(10); t3 = GET_VALUE(11); u = 0, v = 2; @@ -227,7 +227,7 @@ static void computeOrbDescriptor(const KeyPoint& kpt, if( t3 > t2 ) t2 = t3, v = 3; k = t0 > t2 ? u : v; val |= k << 4; - + t0 = GET_VALUE(12); t1 = GET_VALUE(13); t2 = GET_VALUE(14); t3 = GET_VALUE(15); u = 0, v = 2; @@ -235,23 +235,23 @@ static void computeOrbDescriptor(const KeyPoint& kpt, if( t3 > t2 ) t2 = t3, v = 3; k = t0 > t2 ? u : v; val |= k << 6; - + desc[i] = (uchar)val; } } else CV_Error( CV_StsBadSize, "Wrong WTA_K. It can be only 2, 3 or 4." ); - + #undef GET_VALUE } - - + + static void initializeOrbPattern( const Point* pattern0, vector& pattern, int ntuples, int tupleSize, int poolSize ) { RNG rng(0x12345678); int i, k, k1; pattern.resize(ntuples*tupleSize); - + for( i = 0; i < ntuples; i++ ) { for( k = 0; k < tupleSize; k++ ) @@ -545,7 +545,7 @@ static void makeRandomPattern(int patchSize, Point* pattern, int npoints) } } - + static inline float getScale(int level, int firstLevel, double scaleFactor) { return (float)std::pow(scaleFactor, (double)(level - firstLevel)); @@ -570,8 +570,8 @@ int ORB::descriptorSize() const int ORB::descriptorType() const { return CV_8U; -} - +} + /** Compute the ORB features and descriptors on an image * @param img the image to compute the features and descriptors on * @param mask the mask to apply @@ -599,7 +599,7 @@ static void computeOrientation(const Mat& image, vector& keypoints, keypoint->angle = IC_Angle(image, halfPatchSize, keypoint->pt, umax); } } - + /** Compute the ORB keypoints on an image * @param image_pyramid the image pyramid to compute the features and descriptors on @@ -614,11 +614,11 @@ static void computeKeyPoints(const vector& imagePyramid, { int nlevels = (int)imagePyramid.size(); vector nfeaturesPerLevel(nlevels); - + // fill the extractors and descriptors for the corresponding scales float factor = (float)(1.0 / scaleFactor); float ndesiredFeaturesPerScale = nfeatures*(1 - factor)/(1 - (float)pow((double)factor, (double)nlevels)); - + int sumFeatures = 0; for( int level = 0; level < nlevels-1; level++ ) { @@ -627,19 +627,19 @@ static void computeKeyPoints(const vector& imagePyramid, ndesiredFeaturesPerScale *= factor; } nfeaturesPerLevel[nlevels-1] = std::max(nfeatures - sumFeatures, 0); - + // Make sure we forget about what is too close to the boundary //edge_threshold_ = std::max(edge_threshold_, patch_size_/2 + kKernelWidth / 2 + 2); - + // pre-compute the end of a row in a circular patch int halfPatchSize = patchSize / 2; vector umax(halfPatchSize + 1); - + int v, v0, vmax = cvFloor(halfPatchSize * sqrt(2.f) / 2 + 1); int vmin = cvCeil(halfPatchSize * sqrt(2.f) / 2); for (v = 0; v <= vmax; ++v) umax[v] = cvRound(sqrt((double)halfPatchSize * halfPatchSize - v * v)); - + // Make sure we are symmetric for (v = halfPatchSize, v0 = 0; v >= vmin; --v) { @@ -648,37 +648,37 @@ static void computeKeyPoints(const vector& imagePyramid, umax[v] = v0; ++v0; } - + allKeypoints.resize(nlevels); - + for (int level = 0; level < nlevels; ++level) { int nfeatures = nfeaturesPerLevel[level]; allKeypoints[level].reserve(nfeatures*2); - + vector & keypoints = allKeypoints[level]; - + // Detect FAST features, 20 is a good threshold FastFeatureDetector fd(20, true); fd.detect(imagePyramid[level], keypoints, maskPyramid[level]); - + // Remove keypoints very close to the border KeyPointsFilter::runByImageBorder(keypoints, imagePyramid[level].size(), edgeThreshold); - + if( scoreType == ORB::HARRIS_SCORE ) { // Keep more points than necessary as FAST does not give amazing corners KeyPointsFilter::retainBest(keypoints, 2 * nfeatures); - + // Compute the Harris cornerness (better scoring than FAST) HarrisResponses(imagePyramid[level], keypoints, 7, HARRIS_K); } - + //cull to the final desired level, using the new Harris scores or the original FAST scores. - KeyPointsFilter::retainBest(keypoints, nfeatures); - + KeyPointsFilter::retainBest(keypoints, nfeatures); + float sf = getScale(level, firstLevel, scaleFactor); - + // Set the level of the coordinates for (vector::iterator keypoint = keypoints.begin(), keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint) @@ -686,12 +686,12 @@ static void computeKeyPoints(const vector& imagePyramid, keypoint->octave = level; keypoint->size = patchSize*sf; } - + computeOrientation(imagePyramid[level], keypoints, halfPatchSize, umax); } -} +} + - /** Compute the ORB decriptors * @param image the image to compute the features and descriptors on * @param integral_image the integral image of the image (can be empty, but the computation will be slower) @@ -706,12 +706,12 @@ static void computeDescriptors(const Mat& image, vector& keypoints, Ma CV_Assert(image.type() == CV_8UC1); //create the descriptor mat, keypoints.size() rows, BYTES cols descriptors = Mat::zeros((int)keypoints.size(), dsize, CV_8UC1); - + for (size_t i = 0; i < keypoints.size(); i++) computeOrbDescriptor(keypoints[i], image, &pattern[0], descriptors.ptr((int)i), dsize, WTA_K); } - - + + /** Compute the ORB features and descriptors on an image * @param img the image to compute the features and descriptors on * @param mask the mask to apply @@ -725,21 +725,21 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke { bool do_keypoints = !useProvidedKeypoints; bool do_descriptors = _descriptors.needed(); - + if( (!do_keypoints && !do_descriptors) || _image.empty() ) return; - + //ROI handling const int HARRIS_BLOCK_SIZE = 9; int halfPatchSize = patchSize / 2; int border = std::max(edgeThreshold, std::max(halfPatchSize, HARRIS_BLOCK_SIZE/2))+1; - + Mat image = _image.getMat(), mask = _mask.getMat(); if( image.type() != CV_8UC1 ) cvtColor(_image, image, CV_BGR2GRAY); - + int nlevels = this->nlevels; - + if( !do_keypoints ) { // if we have pre-computed keypoints, they may use more levels than it is set in parameters @@ -756,7 +756,7 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke nlevels = std::max(nlevels, std::max(_keypoints[i].octave, 0)); nlevels++; } - + // Pre-compute the scale pyramids vector imagePyramid(nlevels), maskPyramid(nlevels); for (int level = 0; level < nlevels; ++level) @@ -766,49 +766,48 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke Size wholeSize(sz.width + border*2, sz.height + border*2); Mat temp(wholeSize, image.type()), masktemp; imagePyramid[level] = temp(Rect(border, border, sz.width, sz.height)); - + if( !mask.empty() ) { masktemp = Mat(wholeSize, mask.type()); maskPyramid[level] = masktemp(Rect(border, border, sz.width, sz.height)); } - + // Compute the resized image if( level != firstLevel ) { if( level < firstLevel ) { - resize(image, imagePyramid[level], sz, scale, scale, INTER_LINEAR); + resize(image, imagePyramid[level], sz, 0, 0, INTER_LINEAR); if (!mask.empty()) - resize(mask, maskPyramid[level], sz, scale, scale, INTER_LINEAR); - copyMakeBorder(imagePyramid[level], temp, border, border, border, border, - BORDER_REFLECT_101+BORDER_ISOLATED); + resize(mask, maskPyramid[level], sz, 0, 0, INTER_LINEAR); } else { - resize(imagePyramid[level-1], imagePyramid[level], sz, - 1./scaleFactor, 1./scaleFactor, INTER_LINEAR); + resize(imagePyramid[level-1], imagePyramid[level], sz, 0, 0, INTER_LINEAR); if (!mask.empty()) - resize(maskPyramid[level-1], maskPyramid[level], sz, - 1./scaleFactor, 1./scaleFactor, INTER_LINEAR); - copyMakeBorder(imagePyramid[level], temp, border, border, border, border, - BORDER_REFLECT_101+BORDER_ISOLATED); + { + resize(maskPyramid[level-1], maskPyramid[level], sz, 0, 0, INTER_LINEAR); + threshold(maskPyramid[level], maskPyramid[level], 254, 0, THRESH_TOZERO); + } } + + copyMakeBorder(imagePyramid[level], temp, border, border, border, border, + BORDER_REFLECT_101+BORDER_ISOLATED); + if (!mask.empty()) + copyMakeBorder(maskPyramid[level], masktemp, border, border, border, border, + BORDER_CONSTANT+BORDER_ISOLATED); } else { copyMakeBorder(image, temp, border, border, border, border, BORDER_REFLECT_101); - image.copyTo(imagePyramid[level]); if( !mask.empty() ) - mask.copyTo(maskPyramid[level]); + copyMakeBorder(mask, masktemp, border, border, border, border, + BORDER_CONSTANT+BORDER_ISOLATED); } - - if( !mask.empty() ) - copyMakeBorder(maskPyramid[level], masktemp, border, border, border, border, - BORDER_CONSTANT+BORDER_ISOLATED); } - + // Pre-compute the keypoints (we keep the best over all scales, so this has to be done beforehand vector < vector > allKeypoints; if( do_keypoints ) @@ -817,19 +816,19 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke computeKeyPoints(imagePyramid, maskPyramid, allKeypoints, nfeatures, firstLevel, scaleFactor, edgeThreshold, patchSize, scoreType); - + // make sure we have the right number of keypoints keypoints /*vector temp; - + for (int level = 0; level < n_levels; ++level) { vector& keypoints = all_keypoints[level]; temp.insert(temp.end(), keypoints.begin(), keypoints.end()); keypoints.clear(); } - + KeyPoint::retainBest(temp, n_features_); - + for (vector::iterator keypoint = temp.begin(), keypoint_end = temp.end(); keypoint != keypoint_end; ++keypoint) all_keypoints[keypoint->octave].push_back(*keypoint);*/ @@ -838,19 +837,19 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke { // Remove keypoints very close to the border KeyPointsFilter::runByImageBorder(_keypoints, image.size(), edgeThreshold); - + // Cluster the input keypoints depending on the level they were computed at allKeypoints.resize(nlevels); for (vector::iterator keypoint = _keypoints.begin(), keypointEnd = _keypoints.end(); keypoint != keypointEnd; ++keypoint) allKeypoints[keypoint->octave].push_back(*keypoint); - + // Make sure we rescale the coordinates for (int level = 0; level < nlevels; ++level) { if (level == firstLevel) continue; - + vector & keypoints = allKeypoints[level]; float scale = 1/getScale(level, firstLevel, scaleFactor); for (vector::iterator keypoint = keypoints.begin(), @@ -858,10 +857,10 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke keypoint->pt *= scale; } } - + Mat descriptors; vector pattern; - + if( do_descriptors ) { int nkeypoints = 0; @@ -874,19 +873,19 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke _descriptors.create(nkeypoints, descriptorSize(), CV_8U); descriptors = _descriptors.getMat(); } - + const int npoints = 512; Point patternbuf[npoints]; const Point* pattern0 = (const Point*)bit_pattern_31_; - + if( patchSize != 31 ) { pattern0 = patternbuf; makeRandomPattern(patchSize, patternbuf, npoints); } - + CV_Assert( WTA_K == 2 || WTA_K == 3 || WTA_K == 4 ); - + if( WTA_K == 2 ) std::copy(pattern0, pattern0 + npoints, std::back_inserter(pattern)); else @@ -895,7 +894,7 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke initializeOrbPattern(pattern0, pattern, ntuples, WTA_K, npoints); } } - + _keypoints.clear(); int offset = 0; for (int level = 0; level < nlevels; ++level) @@ -903,15 +902,15 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke // Get the features and compute their orientation vector& keypoints = allKeypoints[level]; int nkeypoints = (int)keypoints.size(); - + // Compute the descriptors if (do_descriptors) { Mat desc; - if (!descriptors.empty()) + if (!descriptors.empty()) { desc = descriptors.rowRange(offset, offset + nkeypoints); - } + } offset += nkeypoints; // preprocess the resized image @@ -920,7 +919,7 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke GaussianBlur(workingMat, workingMat, Size(7, 7), 2, 2, BORDER_REFLECT_101); computeDescriptors(workingMat, keypoints, desc, pattern, descriptorSize(), WTA_K); } - + // Copy to the output data if (level != firstLevel) { @@ -933,11 +932,11 @@ void ORB::operator()( InputArray _image, InputArray _mask, vector& _ke _keypoints.insert(_keypoints.end(), keypoints.begin(), keypoints.end()); } } - + void ORB::detectImpl( const Mat& image, vector& keypoints, const Mat& mask) const { (*this)(image, mask, keypoints, noArray(), false); -} +} void ORB::computeImpl( const Mat& image, vector& keypoints, Mat& descriptors) const { diff --git a/modules/features2d/test/test_features2d.cpp b/modules/features2d/test/test_features2d.cpp index 713c0c4a98..272acca0be 100644 --- a/modules/features2d/test/test_features2d.cpp +++ b/modules/features2d/test/test_features2d.cpp @@ -1091,3 +1091,51 @@ TEST( Features2d_DescriptorMatcher_FlannBased, regression ) CV_DescriptorMatcherTest test( "descriptor-matcher-flann-based", new FlannBasedMatcher, 0.04f ); test.safe_run(); } + + +TEST(Features2D_ORB, _1996) +{ + cv::Ptr fd = cv::FeatureDetector::create("ORB"); + cv::Ptr de = cv::DescriptorExtractor::create("ORB"); + + Mat image = cv::imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/lena.jpg"); + ASSERT_FALSE(image.empty()); + + Mat roi(image.size(), CV_8UC1, Scalar(0)); + + Point poly[] = {Point(100, 20), Point(300, 50), Point(400, 200), Point(10, 500)}; + fillConvexPoly(roi, poly, int(sizeof(poly) / sizeof(poly[0])), Scalar(255)); + + std::vector keypoints; + fd->detect(image, keypoints, roi); + cv::Mat descriptors; + de->compute(image, keypoints, descriptors); + + //image.setTo(Scalar(255,255,255), roi); + + int roiViolations = 0; + for(std::vector::const_iterator kp = keypoints.begin(); kp != keypoints.end(); ++kp) + { + int x = cvRound(kp->pt.x); + int y = cvRound(kp->pt.y); + + ASSERT_LE(0, x); + ASSERT_LE(0, y); + ASSERT_GT(image.cols, x); + ASSERT_GT(image.rows, y); + + // if (!roi.at(y,x)) + // { + // roiViolations++; + // circle(image, kp->pt, 3, Scalar(0,0,255)); + // } + } + + // if(roiViolations) + // { + // imshow("img", image); + // waitKey(); + // } + + ASSERT_EQ(0, roiViolations); +} \ No newline at end of file diff --git a/modules/highgui/doc/user_interface.rst b/modules/highgui/doc/user_interface.rst index 659d86eccc..7b39a193c1 100644 --- a/modules/highgui/doc/user_interface.rst +++ b/modules/highgui/doc/user_interface.rst @@ -10,6 +10,7 @@ Creates a trackbar and attaches it to the specified window. .. ocv:function:: int createTrackbar( const string& trackbarname, const string& winname, int* value, int count, TrackbarCallback onChange=0, void* userdata=0) .. ocv:cfunction:: int cvCreateTrackbar( const char* trackbar_name, const char* window_name, int* value, int count, CvTrackbarCallback on_change=NULL ) + .. ocv:pyoldfunction:: cv.CreateTrackbar(trackbarName, windowName, value, count, onChange) -> None :param trackbarname: Name of the created trackbar. @@ -41,6 +42,7 @@ Returns the trackbar position. .. ocv:pyfunction:: cv2.getTrackbarPos(trackbarname, winname) -> retval .. ocv:cfunction:: int cvGetTrackbarPos( const char* trackbar_name, const char* window_name ) + .. ocv:pyoldfunction:: cv.GetTrackbarPos(trackbarName, windowName) -> retval :param trackbarname: Name of the trackbar. @@ -62,6 +64,7 @@ Displays an image in the specified window. .. ocv:pyfunction:: cv2.imshow(winname, mat) -> None .. ocv:cfunction:: void cvShowImage( const char* name, const CvArr* image ) + .. ocv:pyoldfunction:: cv.ShowImage(name, image) -> None :param winname: Name of the window. @@ -86,6 +89,7 @@ Creates a window. .. ocv:pyfunction:: cv2.namedWindow(winname[, flags]) -> None .. ocv:cfunction:: int cvNamedWindow( const char* name, int flags=CV_WINDOW_AUTOSIZE ) + .. ocv:pyoldfunction:: cv.NamedWindow(name, flags=CV_WINDOW_AUTOSIZE)-> None :param name: Name of the window in the window caption that may be used as a window identifier. @@ -120,6 +124,7 @@ Destroys a window. .. ocv:pyfunction:: cv2.destroyWindow(winname) -> None .. ocv:cfunction:: void cvDestroyWindow( const char* name ) + .. ocv:pyoldfunction:: cv.DestroyWindow(name)-> None :param winname: Name of the window to be destroyed. @@ -136,6 +141,7 @@ Destroys all of the HighGUI windows. .. ocv:pyfunction:: cv2.destroyAllWindows() -> None .. ocv:cfunction:: void cvDestroyAllWindows() + .. ocv:pyoldfunction:: cv.DestroyAllWindows()-> None The function ``destroyAllWindows`` destroys all of the opened HighGUI windows. @@ -145,10 +151,15 @@ MoveWindow ---------- Moves window to the specified position +.. ocv:function:: void moveWindow( const string& winname, int x, int y ) + +.. ocv:pyfunction:: cv2.moveWindow(winname, x, y) -> None + .. ocv:cfunction:: void cvMoveWindow( const char* name, int x, int y ) + .. ocv:pyoldfunction:: cv.MoveWindow(name, x, y)-> None - :param name: Window name + :param winname: Window name :param x: The new x-coordinate of the window @@ -159,10 +170,15 @@ ResizeWindow ------------ Resizes window to the specified size +.. ocv:function:: void resizeWindow( const string& winname, int width, int height ) + +.. ocv:pyfunction:: cv2.resizeWindow(winname, width, height) -> None + .. ocv:cfunction:: void cvResizeWindow( const char* name, int width, int height ) + .. ocv:pyoldfunction:: cv.ResizeWindow(name, width, height)-> None - :param name: Window name + :param winname: Window name :param width: The new window width @@ -179,14 +195,17 @@ SetMouseCallback ---------------- Sets mouse handler for the specified window +.. ocv:function:: void setMouseCallback( const string& winname, MouseCallback onMouse, void* userdata=0 ) + .. ocv:cfunction:: void cvSetMouseCallback( const char* window_name, CvMouseCallback on_mouse, void* param=NULL ) + .. ocv:pyoldfunction:: cv.SetMouseCallback(windowName, onMouse, param=None) -> None - :param window_name: Window name + :param winname: Window name - :param on_mouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback. + :param onMouse: Mouse callback. See OpenCV samples, such as http://code.opencv.org/svn/opencv/trunk/opencv/samples/cpp/ffilldemo.cpp, on how to specify and use the callback. - :param param: The optional parameter passed to the callback. + :param userdata: The optional parameter passed to the callback. setTrackbarPos @@ -198,6 +217,7 @@ Sets the trackbar position. .. ocv:pyfunction:: cv2.setTrackbarPos(trackbarname, winname, pos) -> None .. ocv:cfunction:: void cvSetTrackbarPos( const char* trackbar_name, const char* window_name, int pos ) + .. ocv:pyoldfunction:: cv.SetTrackbarPos(trackbarName, windowName, pos)-> None :param trackbarname: Name of the trackbar. @@ -221,6 +241,7 @@ Waits for a pressed key. .. ocv:pyfunction:: cv2.waitKey([delay]) -> retval .. ocv:cfunction:: int cvWaitKey( int delay=0 ) + .. ocv:pyoldfunction:: cv.WaitKey(delay=0)-> int :param delay: Delay in milliseconds. 0 is the special value that means "forever". diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index c4e20b5d26..f887837da4 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -3353,6 +3353,10 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) if( code == CV_BGR2HSV || code == CV_RGB2HSV || code == CV_BGR2HSV_FULL || code == CV_RGB2HSV_FULL ) { +#ifdef HAVE_TEGRA_OPTIMIZATION + if(tegra::cvtRGB2HSV(src, dst, bidx, hrange)) + break; +#endif if( depth == CV_8U ) CvtColorLoop(src, dst, RGB2HSV_b(scn, bidx, hrange)); else diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 278f888c5a..0962af7a5b 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -47,7 +47,7 @@ // */ #include "precomp.hpp" - + namespace cv { @@ -349,7 +349,7 @@ struct VResizeLinearVec_32s8u { if( !checkHardwareSupport(CV_CPU_SSE2) ) return 0; - + const int** src = (const int**)_src; const short* beta = (const short*)_beta; const int *S0 = src[0], *S1 = src[1]; @@ -432,7 +432,7 @@ template struct VResizeLinearVec_32f16 { if( !checkHardwareSupport(CV_CPU_SSE2) ) return 0; - + const float** src = (const float**)_src; const float* beta = (const float*)_beta; const float *S0 = src[0], *S1 = src[1]; @@ -522,7 +522,7 @@ template struct VResizeLinearVec_32f16 }; typedef VResizeLinearVec_32f16 VResizeLinearVec_32f16u; -typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s; +typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s; struct VResizeLinearVec_32f { @@ -530,7 +530,7 @@ struct VResizeLinearVec_32f { if( !checkHardwareSupport(CV_CPU_SSE) ) return 0; - + const float** src = (const float**)_src; const float* beta = (const float*)_beta; const float *S0 = src[0], *S1 = src[1]; @@ -581,7 +581,7 @@ struct VResizeCubicVec_32s8u { if( !checkHardwareSupport(CV_CPU_SSE2) ) return 0; - + const int** src = (const int**)_src; const short* beta = (const short*)_beta; const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; @@ -676,7 +676,7 @@ template struct VResizeCubicVec_32f16 { if( !checkHardwareSupport(CV_CPU_SSE2) ) return 0; - + const float** src = (const float**)_src; const float* beta = (const float*)_beta; const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; @@ -729,7 +729,7 @@ template struct VResizeCubicVec_32f16 }; typedef VResizeCubicVec_32f16 VResizeCubicVec_32f16u; -typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s; +typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s; struct VResizeCubicVec_32f { @@ -737,7 +737,7 @@ struct VResizeCubicVec_32f { if( !checkHardwareSupport(CV_CPU_SSE) ) return 0; - + const float** src = (const float**)_src; const float* beta = (const float*)_beta; const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; @@ -795,17 +795,17 @@ typedef HResizeNoVec HResizeLinearVec_8u32s; typedef HResizeNoVec HResizeLinearVec_16u32f; typedef HResizeNoVec HResizeLinearVec_16s32f; typedef HResizeNoVec HResizeLinearVec_32f; - + typedef VResizeNoVec VResizeLinearVec_32s8u; typedef VResizeNoVec VResizeLinearVec_32f16u; typedef VResizeNoVec VResizeLinearVec_32f16s; typedef VResizeNoVec VResizeLinearVec_32f; - + typedef VResizeNoVec VResizeCubicVec_32s8u; typedef VResizeNoVec VResizeCubicVec_32f16u; typedef VResizeNoVec VResizeCubicVec_32f16s; typedef VResizeNoVec VResizeCubicVec_32f; - + #endif @@ -869,7 +869,7 @@ struct VResizeLinear typedef T value_type; typedef WT buf_type; typedef AT alpha_type; - + void operator()(const WT** src, T* dst, const AT* beta, int width ) const { WT b0 = beta[0], b1 = beta[1]; @@ -1035,7 +1035,7 @@ struct VResizeLanczos4 CastOp castOp; VecOp vecOp; int k, x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width); - #if CV_ENABLE_UNROLLED + #if CV_ENABLE_UNROLLED for( ; x <= width - 4; x += 4 ) { WT b = beta[0]; @@ -1144,7 +1144,7 @@ static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int int dy, dx, k = 0; int area = scale_x*scale_y; float scale = 1.f/(scale_x*scale_y); - int dwidth1 = (ssize.width/scale_x)*cn; + int dwidth1 = (ssize.width/scale_x)*cn; dsize.width *= cn; ssize.width *= cn; @@ -1158,7 +1158,7 @@ static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int D[dx] = 0; continue; } - + for( dx = 0; dx < w; dx++ ) { const T* S = (const T*)(src.data + src.step*sy0) + xofs[dx]; @@ -1173,14 +1173,14 @@ static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int D[dx] = saturate_cast(sum*scale); } - + for( ; dx < dsize.width; dx++ ) { WT sum = 0; int count = 0, sx0 = xofs[dx]; if( sx0 >= ssize.width ) D[dx] = 0; - + for( int sy = 0; sy < scale_y; sy++ ) { if( sy0 + sy >= ssize.height ) @@ -1194,7 +1194,7 @@ static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int count++; } } - + D[dx] = saturate_cast((float)sum/count); } } @@ -1318,7 +1318,7 @@ typedef void (*ResizeAreaFunc)( const Mat& src, Mat& dst, const DecimateAlpha* xofs, int xofs_count ); } - + ////////////////////////////////////////////////////////////////////////////////////////// void cv::resize( InputArray _src, OutputArray _dst, Size dsize, @@ -1428,7 +1428,7 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, Mat src = _src.getMat(); Size ssize = src.size(); - + CV_Assert( ssize.area() > 0 ); CV_Assert( !(dsize == Size()) || (inv_scale_x > 0 && inv_scale_y > 0) ); if( dsize == Size() ) @@ -2310,7 +2310,7 @@ static void remapLanczos4( const Mat& _src, Mat& _dst, const Mat& _xy, int dx, dy; CastOp castOp; int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101; - + unsigned width1 = std::max(ssize.width-7, 0), height1 = std::max(ssize.height-7, 0); if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() ) @@ -2410,7 +2410,7 @@ typedef void (*RemapFunc)(const Mat& _src, Mat& _dst, const Mat& _xy, int borderType, const Scalar& _borderValue); } - + void cv::remap( InputArray _src, OutputArray _dst, InputArray _map1, InputArray _map2, int interpolation, int borderType, const Scalar& borderValue ) @@ -2449,9 +2449,9 @@ void cv::remap( InputArray _src, OutputArray _dst, }; Mat src = _src.getMat(), map1 = _map1.getMat(), map2 = _map2.getMat(); - + CV_Assert( (!map2.data || map2.size() == map1.size())); - + _dst.create( map1.size(), src.type() ); Mat dst = _dst.getMat(); if( dst.data == src.data ) @@ -2703,7 +2703,7 @@ void cv::convertMaps( InputArray _map1, InputArray _map2, CV_Assert( dstm1type == CV_16SC2 || dstm1type == CV_32FC1 || dstm1type == CV_32FC2 ); _dstmap1.create( size, dstm1type ); dstmap1 = _dstmap1.getMat(); - + if( !nninterpolate && dstm1type != CV_32FC2 ) { _dstmap2.create( size, dstm1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 ); @@ -2855,7 +2855,7 @@ void cv::warpAffine( InputArray _src, OutputArray _dst, if (borderType == BORDER_REPLICATE) { if( tegra::warpAffine(src, dst, M, interpolation, borderType, borderValue) ) - return; + return; } else { @@ -2979,7 +2979,7 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, Mat src = _src.getMat(), M0 = _M0.getMat(); _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() ); Mat dst = _dst.getMat(); - + CV_Assert( src.cols > 0 && src.rows > 0 ); if( dst.data == src.data ) src = src.clone(); @@ -2999,19 +2999,6 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, invert(matM, matM); #ifdef HAVE_TEGRA_OPTIMIZATION - // if (borderType == BORDER_REPLICATE) - // { - // if( tegra::warpPerspective(src, dst, M, interpolation, borderType, borderValue) ) - // return; - // } - // else - // { - // double warp_mat[9]; - // Mat warp_m(3, 3, CV_64F, warp_mat); - // M0.convertTo(warp_m, warp_m.type()); - // if( tegra::warpPerspective(src, dst, warp_mat, interpolation, borderType, borderValue) ) - // return; - // } if( tegra::warpPerspective(src, dst, M, interpolation, borderType, borderValue) ) return; #endif @@ -3048,7 +3035,7 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W)); int X = saturate_cast(fX); int Y = saturate_cast(fY); - + xy[x1*2] = saturate_cast(X); xy[x1*2+1] = saturate_cast(Y); } @@ -3063,7 +3050,7 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W)); int X = saturate_cast(fX); int Y = saturate_cast(fY); - + xy[x1*2] = saturate_cast(X >> INTER_BITS); xy[x1*2+1] = saturate_cast(Y >> INTER_BITS); alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + @@ -3195,26 +3182,26 @@ cv::Mat cv::getAffineTransform( const Point2f src[], const Point2f dst[] ) solve( A, B, X ); return M; } - + void cv::invertAffineTransform(InputArray _matM, OutputArray __iM) { Mat matM = _matM.getMat(); CV_Assert(matM.rows == 2 && matM.cols == 3); __iM.create(2, 3, matM.type()); Mat _iM = __iM.getMat(); - + if( matM.type() == CV_32F ) { const float* M = (const float*)matM.data; float* iM = (float*)_iM.data; int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0])); - + double D = M[0]*M[step+1] - M[1]*M[step]; D = D != 0 ? 1./D : 0; double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D; double b1 = -A11*M[2] - A12*M[step+2]; double b2 = -A21*M[2] - A22*M[step+2]; - + iM[0] = (float)A11; iM[1] = (float)A12; iM[2] = (float)b1; iM[istep] = (float)A21; iM[istep+1] = (float)A22; iM[istep+2] = (float)b2; } @@ -3223,19 +3210,19 @@ void cv::invertAffineTransform(InputArray _matM, OutputArray __iM) const double* M = (const double*)matM.data; double* iM = (double*)_iM.data; int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0])); - + double D = M[0]*M[step+1] - M[1]*M[step]; D = D != 0 ? 1./D : 0; double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D; double b1 = -A11*M[2] - A12*M[step+2]; double b2 = -A21*M[2] - A22*M[step+2]; - + iM[0] = A11; iM[1] = A12; iM[2] = b1; iM[istep] = A21; iM[istep+1] = A22; iM[istep+2] = b2; } else CV_Error( CV_StsUnsupportedFormat, "" ); -} +} cv::Mat cv::getPerspectiveTransform(InputArray _src, InputArray _dst) { diff --git a/modules/refman.rst b/modules/refman.rst deleted file mode 100644 index dc15ab9d8e..0000000000 --- a/modules/refman.rst +++ /dev/null @@ -1,24 +0,0 @@ -############################ -OpenCV API Reference -############################ - -.. toctree:: - :maxdepth: 2 - - core/doc/intro.rst - core/doc/core.rst - imgproc/doc/imgproc.rst - highgui/doc/highgui.rst - video/doc/video.rst - calib3d/doc/calib3d.rst - features2d/doc/features2d.rst - objdetect/doc/objdetect.rst - ml/doc/ml.rst - flann/doc/flann.rst - gpu/doc/gpu.rst - photo/doc/photo.rst - stitching/doc/stitching.rst - nonfree/doc/nonfree.rst - contrib/doc/contrib.rst - legacy/doc/legacy.rst - diff --git a/modules/refman.rst.in b/modules/refman.rst.in new file mode 100644 index 0000000000..e039c09750 --- /dev/null +++ b/modules/refman.rst.in @@ -0,0 +1,9 @@ +############################ +OpenCV API Reference +############################ + +.. toctree:: + :maxdepth: 2 + + core/doc/intro.rst +@OPENCV_REFMAN_TOC@