diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 4928df65d1..37c5e7089a 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -2842,6 +2842,76 @@ unit length. */ CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t ); +/** @brief Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of +inliers that pass the check. + +@param points1 Array of N 2D points from the first image. The point coordinates should be +floating-point (single or double precision). +@param points2 Array of the second image points of the same size and format as points1 . +@param cameraMatrix1 Input/output camera matrix for the first camera, the same as in +@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. +@param distCoeffs1 Input/output vector of distortion coefficients, the same as in +@ref calibrateCamera. +@param cameraMatrix2 Input/output camera matrix for the first camera, the same as in +@ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. +@param distCoeffs2 Input/output vector of distortion coefficients, the same as in +@ref calibrateCamera. +@param E The output essential matrix. +@param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple +that performs a change of basis from the first camera's coordinate system to the second camera's +coordinate system. Note that, in general, t can not be used for this tuple, see the parameter +described below. +@param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and +therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit +length. +@param method Method for computing an essential matrix. +- @ref RANSAC for the RANSAC algorithm. +- @ref LMEDS for the LMedS algorithm. +@param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of +confidence (probability) that the estimated matrix is correct. +@param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar +line in pixels, beyond which the point is considered an outlier and is not used for computing the +final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the +point localization, image resolution, and the image noise. +@param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks +inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to +recover pose. In the output mask only inliers which pass the cheirality check. + +This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies +possible pose hypotheses by doing cheirality check. The cheirality check means that the +triangulated 3D points should have positive depth. Some details can be found in @cite Nister03. + +This function can be used to process the output E and mask from @ref findEssentialMat. In this +scenario, points1 and points2 are the same input for findEssentialMat.: +@code + // Example. Estimation of fundamental matrix using the RANSAC algorithm + int point_count = 100; + vector points1(point_count); + vector points2(point_count); + + // initialize the points here ... + for( int i = 0; i < point_count; i++ ) + { + points1[i] = ...; + points2[i] = ...; + } + + // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. + Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; + + // Output: Essential matrix, relative rotation and relative translation. + Mat E, R, t, mask; + + recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); +@endcode + */ +CV_EXPORTS_W int recoverPose( InputArray points1, InputArray points2, + InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + OutputArray E, OutputArray R, OutputArray t, + int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, + InputOutputArray mask = noArray()); + /** @brief Recovers the relative camera rotation and the translation from an estimated essential matrix and the corresponding points in two images, using cheirality check. Returns the number of inliers that pass the check. diff --git a/modules/calib3d/src/five-point.cpp b/modules/calib3d/src/five-point.cpp index d005248535..d947334142 100644 --- a/modules/calib3d/src/five-point.cpp +++ b/modules/calib3d/src/five-point.cpp @@ -401,6 +401,29 @@ protected: } }; +// Find essential matrix given undistorted points and two cameras. +static Mat findEssentialMat_( InputArray _points1, InputArray _points2, + InputArray cameraMatrix1, InputArray cameraMatrix2, + int method, double prob, double threshold, OutputArray _mask) +{ + // Scale the points back. We use "arithmetic mean" between the supplied two camera matrices. + // Thanks to such 2-stage procedure RANSAC threshold still makes sense, because the undistorted + // and rescaled points have a similar value range to the original ones. + Mat _pointsTransformed1, _pointsTransformed2; + Mat cm1 = cameraMatrix1.getMat(), cm2 = cameraMatrix2.getMat(), cm0; + Mat(cm1 + cm2).convertTo(cm0, CV_64F, 0.5); + CV_Assert(cm0.rows == 3 && cm0.cols == 3); + CV_Assert(std::abs(cm0.at(2, 0)) < 1e-3 && + std::abs(cm0.at(2, 1)) < 1e-3 && + std::abs(cm0.at(2, 2) - 1.) < 1e-3); + Mat affine = cm0.rowRange(0, 2); + + transform(_points1, _pointsTransformed1, affine); + transform(_points2, _pointsTransformed2, affine); + + return findEssentialMat(_pointsTransformed1, _pointsTransformed2, cm0, method, prob, threshold, _mask); +} + } // Input should be a vector of n 2D points or a Nx2 matrix @@ -489,25 +512,10 @@ cv::Mat cv::findEssentialMat( InputArray _points1, InputArray _points2, CV_INSTRUMENT_REGION(); // Undistort image points, bring them to 3x3 identity "camera matrix" - Mat _pointsUntistorted1, _pointsUntistorted2; - undistortPoints(_points1, _pointsUntistorted1, cameraMatrix1, distCoeffs1); - undistortPoints(_points2, _pointsUntistorted2, cameraMatrix2, distCoeffs2); - - // Scale the points back. We use "arithmetic mean" between the supplied two camera matrices. - // Thanks to such 2-stage procedure RANSAC threshold still makes sense, because the undistorted - // and rescaled points have a similar value range to the original ones. - Mat cm1 = cameraMatrix1.getMat(), cm2 = cameraMatrix2.getMat(), cm0; - Mat(cm1 + cm2).convertTo(cm0, CV_64F, 0.5); - CV_Assert(cm0.rows == 3 && cm0.cols == 3); - CV_Assert(std::abs(cm0.at(2, 0)) < 1e-3 && - std::abs(cm0.at(2, 1)) < 1e-3 && - std::abs(cm0.at(2, 2) - 1.) < 1e-3); - Mat affine = cm0.rowRange(0, 2); - - transform(_pointsUntistorted1, _pointsUntistorted1, affine); - transform(_pointsUntistorted2, _pointsUntistorted2, affine); - - return findEssentialMat(_pointsUntistorted1, _pointsUntistorted2, cm0, method, prob, threshold, _mask); + Mat _pointsUndistorted1, _pointsUndistorted2; + undistortPoints(_points1, _pointsUndistorted1, cameraMatrix1, distCoeffs1); + undistortPoints(_points2, _pointsUndistorted2, cameraMatrix2, distCoeffs2); + return findEssentialMat_(_pointsUndistorted1, _pointsUndistorted2, cameraMatrix1, cameraMatrix2, method, prob, threshold, _mask); } cv::Mat cv::findEssentialMat( InputArray points1, InputArray points2, @@ -524,6 +532,30 @@ cv::Mat cv::findEssentialMat( InputArray points1, InputArray points2, } +int cv::recoverPose( InputArray _points1, InputArray _points2, + InputArray cameraMatrix1, InputArray distCoeffs1, + InputArray cameraMatrix2, InputArray distCoeffs2, + OutputArray E, OutputArray R, OutputArray t, + int method, double prob, double threshold, + InputOutputArray _mask) +{ + CV_INSTRUMENT_REGION(); + + // Undistort image points, bring them to 3x3 identity "camera matrix" + Mat _pointsUndistorted1, _pointsUndistorted2; + undistortPoints(_points1, _pointsUndistorted1, cameraMatrix1, distCoeffs1); + undistortPoints(_points2, _pointsUndistorted2, cameraMatrix2, distCoeffs2); + + // Get essential matrix. + Mat _E = findEssentialMat_(_pointsUndistorted1, _pointsUndistorted2, cameraMatrix1, cameraMatrix2, + method, prob, threshold, _mask); + CV_Assert(_E.cols == 3 && _E.rows == 3); + E.create(3, 3, _E.type()); + _E.copyTo(E); + + return recoverPose(_E, _pointsUndistorted1, _pointsUndistorted2, Mat::eye(3,3, CV_64F), R, t, _mask); +} + int cv::recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix, OutputArray _R, OutputArray _t, double distanceThresh, InputOutputArray _mask, OutputArray triangulatedPoints) diff --git a/modules/calib3d/test/test_cameracalibration.cpp b/modules/calib3d/test/test_cameracalibration.cpp index 9f3663e8cc..10350527fa 100644 --- a/modules/calib3d/test/test_cameracalibration.cpp +++ b/modules/calib3d/test/test_cameracalibration.cpp @@ -2142,7 +2142,17 @@ TEST(CV_RecoverPoseTest, regression_15341) // camera matrix with both focal lengths = 1, and principal point = (0, 0) const Mat cameraMatrix = Mat::eye(3, 3, CV_64F); - const Mat zeroDistCoeffs = Mat::zeros(1, 5, CV_64F); + + // camera matrix with focal lengths 0.5 and 0.6 respectively and principal point = (100, 200) + double cameraMatrix2Data[] = { 0.5, 0, 100, + 0, 0.6, 200, + 0, 0, 1 }; + const Mat cameraMatrix2( 3, 3, CV_64F, cameraMatrix2Data ); + + // zero and nonzero distortion coefficients + double nonZeroDistCoeffsData[] = { 0.01, 0.0001, 0, 0, 1e-04, 0.2, 0.02, 0.0002 }; // k1, k2, p1, p2, k3, k4, k5, k6 + vector distCoeffsList = {Mat::zeros(1, 5, CV_64F), Mat{1, 8, CV_64F, nonZeroDistCoeffsData}}; + const auto &zeroDistCoeffs = distCoeffsList[0]; int Inliers = 0; @@ -2158,14 +2168,26 @@ TEST(CV_RecoverPoseTest, regression_15341) // Estimation of fundamental matrix using the RANSAC algorithm Mat E, E2, R, t; + + // Check pose when camera matrices are different. + for (const auto &distCoeffs: distCoeffsList) + { + E = findEssentialMat(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, RANSAC, 0.999, 1.0, mask); + recoverPose(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, E2, R, t, RANSAC, 0.999, 1.0, mask); + EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) << + "Two big difference between the same essential matrices computed using different functions with different cameras, testcase " << testcase; + EXPECT_EQ(0, (int)mask[13]) << "Detecting outliers in function failed with different cameras, testcase " << testcase; + } + + // Check pose when camera matrices are the same. E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask); E2 = findEssentialMat(points1, points2, cameraMatrix, zeroDistCoeffs, cameraMatrix, zeroDistCoeffs, RANSAC, 0.999, 1.0, mask); EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) << - "Two big difference between the same essential matrices computed using different functions, testcase " << testcase; - EXPECT_EQ(0, (int)mask[13]) << "Detecting outliers in function findEssentialMat failed, testcase " << testcase; + "Two big difference between the same essential matrices computed using different functions with same cameras, testcase " << testcase; + EXPECT_EQ(0, (int)mask[13]) << "Detecting outliers in function findEssentialMat failed with same cameras, testcase " << testcase; points2[12] = Point2f(0.0f, 0.0f); // provoke another outlier detection for recover Pose Inliers = recoverPose(E, points1, points2, cameraMatrix, R, t, mask); - EXPECT_EQ(0, (int)mask[12]) << "Detecting outliers in function failed, testcase " << testcase; + EXPECT_EQ(0, (int)mask[12]) << "Detecting outliers in function failed with same cameras, testcase " << testcase; } else // testcase with mat input data { @@ -2185,14 +2207,26 @@ TEST(CV_RecoverPoseTest, regression_15341) // Estimation of fundamental matrix using the RANSAC algorithm Mat E, E2, R, t; + + // Check pose when camera matrices are different. + for (const auto &distCoeffs: distCoeffsList) + { + E = findEssentialMat(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, RANSAC, 0.999, 1.0, mask); + recoverPose(points1, points2, cameraMatrix, distCoeffs, cameraMatrix2, distCoeffs, E2, R, t, RANSAC, 0.999, 1.0, mask); + EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) << + "Two big difference between the same essential matrices computed using different functions with different cameras, testcase " << testcase; + EXPECT_EQ(0, (int)mask.at(13)) << "Detecting outliers in function failed with different cameras, testcase " << testcase; + } + + // Check pose when camera matrices are the same. E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask); E2 = findEssentialMat(points1, points2, cameraMatrix, zeroDistCoeffs, cameraMatrix, zeroDistCoeffs, RANSAC, 0.999, 1.0, mask); EXPECT_LT(cv::norm(E, E2, NORM_INF), 1e-4) << - "Two big difference between the same essential matrices computed using different functions, testcase " << testcase; - EXPECT_EQ(0, (int)mask.at(13)) << "Detecting outliers in function findEssentialMat failed, testcase " << testcase; + "Two big difference between the same essential matrices computed using different functions with same cameras, testcase " << testcase; + EXPECT_EQ(0, (int)mask.at(13)) << "Detecting outliers in function findEssentialMat failed with same cameras, testcase " << testcase; points2.at(12) = Point2f(0.0f, 0.0f); // provoke an outlier detection Inliers = recoverPose(E, points1, points2, cameraMatrix, R, t, mask); - EXPECT_EQ(0, (int)mask.at(12)) << "Detecting outliers in function failed, testcase " << testcase; + EXPECT_EQ(0, (int)mask.at(12)) << "Detecting outliers in function failed with same cameras, testcase " << testcase; } EXPECT_EQ(Inliers, point_count - invalid_point_count) << "Number of inliers differs from expected number of inliers, testcase " << testcase;