From d2e08a524e11685f7a76f022d6ee7a2c5072be7c Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 15 Aug 2018 14:55:47 +0300 Subject: [PATCH 1/8] core: repair CV_Assert() messages Multi-argument CV_Assert() is accessible via CV_Assert_N() (with malformed messages). --- modules/core/include/opencv2/core/base.hpp | 36 +++++++++++------- modules/core/include/opencv2/core/cvdef.h | 2 + modules/core/src/matmul.cpp | 38 +++++++++---------- .../dnn/include/opencv2/dnn/shape_utils.hpp | 2 +- modules/dnn/src/caffe/caffe_importer.cpp | 2 +- modules/dnn/src/dnn.cpp | 16 ++++---- modules/dnn/src/layers/batch_norm_layer.cpp | 2 +- modules/dnn/src/layers/convolution_layer.cpp | 13 ++++--- .../dnn/src/layers/crop_and_resize_layer.cpp | 6 +-- modules/dnn/src/layers/eltwise_layer.cpp | 2 +- modules/dnn/src/layers/padding_layer.cpp | 6 +-- modules/dnn/src/layers/pooling_layer.cpp | 9 +++-- modules/dnn/src/layers/prior_box_layer.cpp | 5 ++- modules/dnn/src/layers/reshape_layer.cpp | 2 +- modules/dnn/src/layers/resize_layer.cpp | 6 +-- modules/dnn/src/layers/scale_layer.cpp | 12 +++--- modules/dnn/src/nms.cpp | 4 +- .../src/tensorflow/tf_graph_simplifier.cpp | 6 +-- modules/dnn/src/tensorflow/tf_importer.cpp | 25 ++++++------ modules/dnn/src/torch/torch_importer.cpp | 4 +- modules/dnn/test/test_layers.cpp | 2 +- modules/highgui/src/window_w32.cpp | 2 +- samples/dnn/classification.cpp | 9 ++++- samples/dnn/custom_layers.hpp | 2 +- samples/dnn/segmentation.cpp | 9 ++++- samples/dnn/text_detection.cpp | 15 ++++++-- 26 files changed, 136 insertions(+), 101 deletions(-) diff --git a/modules/core/include/opencv2/core/base.hpp b/modules/core/include/opencv2/core/base.hpp index 2e8e5d5e86..0342ebde52 100644 --- a/modules/core/include/opencv2/core/base.hpp +++ b/modules/core/include/opencv2/core/base.hpp @@ -444,7 +444,13 @@ for example: */ #define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ ) -#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ) +/** @brief Checks a condition at runtime and throws exception if it fails + +The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros +raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release +configurations while CV_DbgAssert is only retained in the Debug configuration. +*/ +#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) //! @cond IGNORED #define CV__ErrorNoReturn( code, msg ) cv::errorNoReturn( code, msg, CV_Func, __FILE__, __LINE__ ) @@ -454,8 +460,8 @@ for example: #define CV_Error CV__ErrorNoReturn #undef CV_Error_ #define CV_Error_ CV__ErrorNoReturn_ -#undef CV_Assert_1 -#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::errorNoReturn( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ) +#undef CV_Assert +#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::errorNoReturn( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) #else // backward compatibility #define CV_ErrorNoReturn CV__ErrorNoReturn @@ -465,6 +471,13 @@ for example: #endif // CV_STATIC_ANALYSIS +//! @cond IGNORED + +#ifdef OPENCV_FORCE_MULTIARG_ASSERT_CHECK +#define CV_Assert_1( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) +#else +#define CV_Assert_1 CV_Assert +#endif #define CV_Assert_2( expr1, expr2 ) CV_Assert_1(expr1); CV_Assert_1(expr2) #define CV_Assert_3( expr1, expr2, expr3 ) CV_Assert_2(expr1, expr2); CV_Assert_1(expr3) #define CV_Assert_4( expr1, expr2, expr3, expr4 ) CV_Assert_3(expr1, expr2, expr3); CV_Assert_1(expr4) @@ -475,21 +488,18 @@ for example: #define CV_Assert_9( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ) CV_Assert_8(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8 ); CV_Assert_1(expr9) #define CV_Assert_10( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9, expr10 ) CV_Assert_9(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ); CV_Assert_1(expr10) -#define CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N -#define CV_VA_NUM_ARGS(...) CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define CV_Assert_N(...) do { __CV_CAT(CV_Assert_, __CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__); } while(0) -/** @brief Checks a condition at runtime and throws exception if it fails +#ifdef OPENCV_FORCE_MULTIARG_ASSERT_CHECK +#undef CV_Assert +#define CV_Assert CV_Assert_N +#endif +//! @endcond -The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros -raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release -configurations while CV_DbgAssert is only retained in the Debug configuration. -*/ -#define CV_Assert(...) do { CVAUX_CONCAT(CV_Assert_, CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__); } while(0) - -/** replaced with CV_Assert(expr) in Debug configuration */ #if defined _DEBUG || defined CV_STATIC_ANALYSIS # define CV_DbgAssert(expr) CV_Assert(expr) #else +/** replaced with CV_Assert(expr) in Debug configuration */ # define CV_DbgAssert(expr) #endif diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 2a3e4420d7..c0f76d1654 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -79,6 +79,8 @@ namespace cv { namespace debug_build_guard { } using namespace debug_build_guard #define __CV_CAT(x, y) __CV_CAT_(x, y) #endif +#define __CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N +#define __CV_VA_NUM_ARGS(...) __CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) // undef problematic defines sometimes defined by system headers (windows.h in particular) #undef small diff --git a/modules/core/src/matmul.cpp b/modules/core/src/matmul.cpp index 7f624b5f76..83607c7184 100644 --- a/modules/core/src/matmul.cpp +++ b/modules/core/src/matmul.cpp @@ -796,7 +796,7 @@ static bool ocl_gemm( InputArray matA, InputArray matB, double alpha, int depth = matA.depth(), cn = matA.channels(); int type = CV_MAKETYPE(depth, cn); - CV_Assert( type == matB.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) ); + CV_Assert_N( type == matB.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) ); const ocl::Device & dev = ocl::Device::getDefault(); bool doubleSupport = dev.doubleFPConfig() > 0; @@ -1555,7 +1555,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha, Size a_size = A.size(), d_size; int len = 0, type = A.type(); - CV_Assert( type == B.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) ); + CV_Assert_N( type == B.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) ); switch( flags & (GEMM_1_T|GEMM_2_T) ) { @@ -1583,7 +1583,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha, if( !C.empty() ) { - CV_Assert( C.type() == type, + CV_Assert_N( C.type() == type, (((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) || ((flags&GEMM_3_T) != 0 && C.rows == d_size.width && C.cols == d_size.height))); } @@ -2457,7 +2457,7 @@ void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean, { CV_INSTRUMENT_REGION() - CV_Assert( data, nsamples > 0 ); + CV_Assert_N( data, nsamples > 0 ); Size size = data[0].size(); int sz = size.width * size.height, esz = (int)data[0].elemSize(); int type = data[0].type(); @@ -2480,7 +2480,7 @@ void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean, for( int i = 0; i < nsamples; i++ ) { - CV_Assert( data[i].size() == size, data[i].type() == type ); + CV_Assert_N( data[i].size() == size, data[i].type() == type ); if( data[i].isContinuous() ) memcpy( _data.ptr(i), data[i].ptr(), sz*esz ); else @@ -2516,7 +2516,7 @@ void cv::calcCovarMatrix( InputArray _src, OutputArray _covar, InputOutputArray int i = 0; for(std::vector::iterator each = src.begin(); each != src.end(); ++each, ++i ) { - CV_Assert( (*each).size() == size, (*each).type() == type ); + CV_Assert_N( (*each).size() == size, (*each).type() == type ); Mat dataRow(size.height, size.width, type, _data.ptr(i)); (*each).copyTo(dataRow); } @@ -2595,7 +2595,7 @@ double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar ) AutoBuffer buf(len); double result = 0; - CV_Assert( type == v2.type(), type == icovar.type(), + CV_Assert_N( type == v2.type(), type == icovar.type(), sz == v2.size(), len == icovar.rows && len == icovar.cols ); sz.width *= v1.channels(); @@ -2888,7 +2888,7 @@ void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata, if( !delta.empty() ) { - CV_Assert( delta.channels() == 1, + CV_Assert_N( delta.channels() == 1, (delta.rows == src.rows || delta.rows == 1), (delta.cols == src.cols || delta.cols == 1)); if( delta.type() != dtype ) @@ -3291,7 +3291,7 @@ double Mat::dot(InputArray _mat) const Mat mat = _mat.getMat(); int cn = channels(); DotProdFunc func = getDotProdFunc(depth()); - CV_Assert( mat.type() == type(), mat.size == size, func != 0 ); + CV_Assert_N( mat.type() == type(), mat.size == size, func != 0 ); if( isContinuous() && mat.isContinuous() ) { @@ -3327,7 +3327,7 @@ CV_IMPL void cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha, if( Carr ) C = cv::cvarrToMat(Carr); - CV_Assert( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)), + CV_Assert_N( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)), (D.cols == ((flags & CV_GEMM_B_T) == 0 ? B.cols : B.rows)), D.type() == A.type() ); @@ -3350,7 +3350,7 @@ cvTransform( const CvArr* srcarr, CvArr* dstarr, m = _m; } - CV_Assert( dst.depth() == src.depth(), dst.channels() == m.rows ); + CV_Assert_N( dst.depth() == src.depth(), dst.channels() == m.rows ); cv::transform( src, dst, m ); } @@ -3360,7 +3360,7 @@ cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat ) { cv::Mat m = cv::cvarrToMat(mat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); - CV_Assert( dst.type() == src.type(), dst.channels() == m.rows-1 ); + CV_Assert_N( dst.type() == src.type(), dst.channels() == m.rows-1 ); cv::perspectiveTransform( src, dst, m ); } @@ -3370,7 +3370,7 @@ CV_IMPL void cvScaleAdd( const CvArr* srcarr1, CvScalar scale, { cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); - CV_Assert( src1.size == dst.size, src1.type() == dst.type() ); + CV_Assert_N( src1.size == dst.size, src1.type() == dst.type() ); cv::scaleAdd( src1, scale.val[0], cv::cvarrToMat(srcarr2), dst ); } @@ -3380,7 +3380,7 @@ cvCalcCovarMatrix( const CvArr** vecarr, int count, CvArr* covarr, CvArr* avgarr, int flags ) { cv::Mat cov0 = cv::cvarrToMat(covarr), cov = cov0, mean0, mean; - CV_Assert( vecarr != 0, count >= 1 ); + CV_Assert_N( vecarr != 0, count >= 1 ); if( avgarr ) mean = mean0 = cv::cvarrToMat(avgarr); @@ -3460,7 +3460,7 @@ cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigen int ecount0 = evals0.cols + evals0.rows - 1; int ecount = evals.cols + evals.rows - 1; - CV_Assert( (evals0.cols == 1 || evals0.rows == 1), + CV_Assert_N( (evals0.cols == 1 || evals0.rows == 1), ecount0 <= ecount, evects0.cols == evects.cols, evects0.rows == ecount0 ); @@ -3491,12 +3491,12 @@ cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr, int n; if( mean.rows == 1 ) { - CV_Assert(dst.cols <= evects.rows, dst.rows == data.rows); + CV_Assert_N(dst.cols <= evects.rows, dst.rows == data.rows); n = dst.cols; } else { - CV_Assert(dst.rows <= evects.rows, dst.cols == data.cols); + CV_Assert_N(dst.rows <= evects.rows, dst.cols == data.cols); n = dst.rows; } pca.eigenvectors = evects.rowRange(0, n); @@ -3522,12 +3522,12 @@ cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr, int n; if( mean.rows == 1 ) { - CV_Assert(data.cols <= evects.rows, dst.rows == data.rows); + CV_Assert_N(data.cols <= evects.rows, dst.rows == data.rows); n = data.cols; } else { - CV_Assert(data.rows <= evects.rows, dst.cols == data.cols); + CV_Assert_N(data.rows <= evects.rows, dst.cols == data.cols); n = data.rows; } pca.eigenvectors = evects.rowRange(0, n); diff --git a/modules/dnn/include/opencv2/dnn/shape_utils.hpp b/modules/dnn/include/opencv2/dnn/shape_utils.hpp index 64811d8184..b0ed3afc54 100644 --- a/modules/dnn/include/opencv2/dnn/shape_utils.hpp +++ b/modules/dnn/include/opencv2/dnn/shape_utils.hpp @@ -209,7 +209,7 @@ inline Range clamp(const Range& r, int axisSize) { Range clamped(std::max(r.start, 0), r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1); - CV_Assert(clamped.start < clamped.end, clamped.end <= axisSize); + CV_Assert_N(clamped.start < clamped.end, clamped.end <= axisSize); return clamped; } diff --git a/modules/dnn/src/caffe/caffe_importer.cpp b/modules/dnn/src/caffe/caffe_importer.cpp index 59f47eef1a..24e918d7dc 100644 --- a/modules/dnn/src/caffe/caffe_importer.cpp +++ b/modules/dnn/src/caffe/caffe_importer.cpp @@ -359,7 +359,7 @@ public: { if (!layerParams.get("use_global_stats", true)) { - CV_Assert(layer.bottom_size() == 1, layer.top_size() == 1); + CV_Assert_N(layer.bottom_size() == 1, layer.top_size() == 1); LayerParams mvnParams; mvnParams.set("eps", layerParams.get("eps", 1e-5)); diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 43ad3d6d42..d8815a5f08 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -134,7 +134,7 @@ void blobFromImages(InputArrayOfArrays images_, OutputArray blob_, double scalef if (ddepth == CV_8U) { CV_CheckEQ(scalefactor, 1.0, "Scaling is not supported for CV_8U blob depth"); - CV_Assert(mean_ == Scalar(), "Mean subtraction is not supported for CV_8U blob depth"); + CV_Assert(mean_ == Scalar() && "Mean subtraction is not supported for CV_8U blob depth"); } std::vector images; @@ -451,8 +451,8 @@ struct DataLayer : public Layer { double scale = scaleFactors[i]; Scalar& mean = means[i]; - CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4, - outputs[i].type() == CV_32F); + CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4); + CV_CheckTypeEQ(outputs[i].type(), CV_32FC1, ""); bool singleMean = true; for (int j = 1; j < std::min(4, inputsData[i].size[1]) && singleMean; ++j) @@ -569,7 +569,7 @@ struct DataLayer : public Layer void finalize(const std::vector&, std::vector& outputs) CV_OVERRIDE { - CV_Assert(outputs.size() == scaleFactors.size(), outputs.size() == means.size(), + CV_Assert_N(outputs.size() == scaleFactors.size(), outputs.size() == means.size(), inputsData.size() == outputs.size()); skip = true; for (int i = 0; skip && i < inputsData.size(); ++i) @@ -1237,7 +1237,7 @@ struct Net::Impl void initHalideBackend() { CV_TRACE_FUNCTION(); - CV_Assert(preferableBackend == DNN_BACKEND_HALIDE, haveHalide()); + CV_Assert_N(preferableBackend == DNN_BACKEND_HALIDE, haveHalide()); // Iterator to current layer. MapIdToLayerData::iterator it = layers.begin(); @@ -1330,7 +1330,7 @@ struct Net::Impl void initInfEngineBackend() { CV_TRACE_FUNCTION(); - CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine()); + CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine()); #ifdef HAVE_INF_ENGINE MapIdToLayerData::iterator it; Ptr net; @@ -1827,7 +1827,7 @@ struct Net::Impl // To prevent memory collisions (i.e. when input of // [conv] and output of [eltwise] is the same blob) // we allocate a new blob. - CV_Assert(ld.outputBlobs.size() == 1, ld.outputBlobsWrappers.size() == 1); + CV_Assert_N(ld.outputBlobs.size() == 1, ld.outputBlobsWrappers.size() == 1); ld.outputBlobs[0] = ld.outputBlobs[0].clone(); ld.outputBlobsWrappers[0] = wrap(ld.outputBlobs[0]); @@ -1984,7 +1984,7 @@ struct Net::Impl } // Layers that refer old input Mat will refer to the // new data but the same Mat object. - CV_Assert(curr_output.data == output_slice.data, oldPtr == &curr_output); + CV_Assert_N(curr_output.data == output_slice.data, oldPtr == &curr_output); } ld.skip = true; printf_(("\toptimized out Concat layer %s\n", concatLayer->name.c_str())); diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 1ced532fdc..c3a54c127d 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -48,7 +48,7 @@ public: float varMeanScale = 1.f; if (!hasWeights && !hasBias && blobs.size() > 2 && useGlobalStats) { - CV_Assert(blobs.size() == 3, blobs[2].type() == CV_32F); + CV_Assert(blobs.size() == 3); CV_CheckTypeEQ(blobs[2].type(), CV_32FC1, ""); varMeanScale = blobs[2].at(0); if (varMeanScale != 0) varMeanScale = 1/varMeanScale; diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 08760ab49a..02f5ac8d60 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -349,8 +349,8 @@ public: // (conv(I) + b1 ) * w + b2 // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2] const int outCn = weightsMat.size[0]; - CV_Assert(!weightsMat.empty(), biasvec.size() == outCn + 2, - w.empty() || outCn == w.total(), b.empty() || outCn == b.total()); + CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2, + w.empty() || outCn == w.total(), b.empty() || outCn == b.total()); if (!w.empty()) { @@ -512,13 +512,14 @@ public: Size kernel, Size pad, Size stride, Size dilation, const ActivationLayer* activ, int ngroups, int nstripes ) { - CV_Assert( input.dims == 4 && output.dims == 4, + CV_Assert_N( + input.dims == 4 && output.dims == 4, input.size[0] == output.size[0], weights.rows == output.size[1], weights.cols == (input.size[1]/ngroups)*kernel.width*kernel.height, input.type() == output.type(), input.type() == weights.type(), - input.type() == CV_32F, + input.type() == CV_32FC1, input.isContinuous(), output.isContinuous(), biasvec.size() == (size_t)output.size[1]+2); @@ -1009,8 +1010,8 @@ public: name.c_str(), inputs[0]->size[0], inputs[0]->size[1], inputs[0]->size[2], inputs[0]->size[3], kernel.width, kernel.height, pad.width, pad.height, stride.width, stride.height, dilation.width, dilation.height);*/ - CV_Assert(inputs.size() == (size_t)1, inputs[0]->size[1] % blobs[0].size[1] == 0, - outputs.size() == 1, inputs[0]->data != outputs[0].data); + CV_Assert_N(inputs.size() == (size_t)1, inputs[0]->size[1] % blobs[0].size[1] == 0, + outputs.size() == 1, inputs[0]->data != outputs[0].data); int ngroups = inputs[0]->size[1]/blobs[0].size[1]; CV_Assert(outputs[0].size[1] % ngroups == 0); diff --git a/modules/dnn/src/layers/crop_and_resize_layer.cpp b/modules/dnn/src/layers/crop_and_resize_layer.cpp index ad2280f30c..f3aa7a8453 100644 --- a/modules/dnn/src/layers/crop_and_resize_layer.cpp +++ b/modules/dnn/src/layers/crop_and_resize_layer.cpp @@ -14,7 +14,7 @@ class CropAndResizeLayerImpl CV_FINAL : public CropAndResizeLayer public: CropAndResizeLayerImpl(const LayerParams& params) { - CV_Assert(params.has("width"), params.has("height")); + CV_Assert_N(params.has("width"), params.has("height")); outWidth = params.get("width"); outHeight = params.get("height"); } @@ -24,7 +24,7 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { - CV_Assert(inputs.size() == 2, inputs[0].size() == 4); + CV_Assert_N(inputs.size() == 2, inputs[0].size() == 4); if (inputs[0][0] != 1) CV_Error(Error::StsNotImplemented, ""); outputs.resize(1, MatShape(4)); @@ -56,7 +56,7 @@ public: const int inpWidth = inp.size[3]; const int inpSpatialSize = inpHeight * inpWidth; const int outSpatialSize = outHeight * outWidth; - CV_Assert(inp.isContinuous(), out.isContinuous()); + CV_Assert_N(inp.isContinuous(), out.isContinuous()); for (int b = 0; b < boxes.rows; ++b) { diff --git a/modules/dnn/src/layers/eltwise_layer.cpp b/modules/dnn/src/layers/eltwise_layer.cpp index 3a2c0ddb3f..567d598416 100644 --- a/modules/dnn/src/layers/eltwise_layer.cpp +++ b/modules/dnn/src/layers/eltwise_layer.cpp @@ -139,7 +139,7 @@ public: const std::vector& coeffs, EltwiseOp op, const ActivationLayer* activ, int nstripes) { - CV_Assert(1 < dst.dims && dst.dims <= 4, dst.type() == CV_32F, dst.isContinuous()); + CV_Check(dst.dims, 1 < dst.dims && dst.dims <= 4, ""); CV_CheckTypeEQ(dst.type(), CV_32FC1, ""); CV_Assert(dst.isContinuous()); CV_Assert(coeffs.empty() || coeffs.size() == (size_t)nsrcs); for( int i = 0; i > nsrcs; i++ ) diff --git a/modules/dnn/src/layers/padding_layer.cpp b/modules/dnn/src/layers/padding_layer.cpp index 266d887cd8..af58c78f55 100644 --- a/modules/dnn/src/layers/padding_layer.cpp +++ b/modules/dnn/src/layers/padding_layer.cpp @@ -38,7 +38,7 @@ public: { paddings[i].first = paddingsParam.get(i * 2); // Pad before. paddings[i].second = paddingsParam.get(i * 2 + 1); // Pad after. - CV_Assert(paddings[i].first >= 0, paddings[i].second >= 0); + CV_Assert_N(paddings[i].first >= 0, paddings[i].second >= 0); } } @@ -127,8 +127,8 @@ public: const int padBottom = outHeight - dstRanges[2].end; const int padLeft = dstRanges[3].start; const int padRight = outWidth - dstRanges[3].end; - CV_Assert(padTop < inpHeight, padBottom < inpHeight, - padLeft < inpWidth, padRight < inpWidth); + CV_CheckLT(padTop, inpHeight, ""); CV_CheckLT(padBottom, inpHeight, ""); + CV_CheckLT(padLeft, inpWidth, ""); CV_CheckLT(padRight, inpWidth, ""); for (size_t n = 0; n < inputs[0]->size[0]; ++n) { diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 4e0fea21d8..573565d025 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -216,15 +216,15 @@ public: switch (type) { case MAX: - CV_Assert(inputs.size() == 1, outputs.size() == 2); + CV_Assert_N(inputs.size() == 1, outputs.size() == 2); maxPooling(*inputs[0], outputs[0], outputs[1]); break; case AVE: - CV_Assert(inputs.size() == 1, outputs.size() == 1); + CV_Assert_N(inputs.size() == 1, outputs.size() == 1); avePooling(*inputs[0], outputs[0]); break; case ROI: case PSROI: - CV_Assert(inputs.size() == 2, outputs.size() == 1); + CV_Assert_N(inputs.size() == 2, outputs.size() == 1); roiPooling(*inputs[0], *inputs[1], outputs[0]); break; default: @@ -311,7 +311,8 @@ public: Size stride, Size pad, bool avePoolPaddedArea, int poolingType, float spatialScale, bool computeMaxIdx, int nstripes) { - CV_Assert(src.isContinuous(), dst.isContinuous(), + CV_Assert_N( + src.isContinuous(), dst.isContinuous(), src.type() == CV_32F, src.type() == dst.type(), src.dims == 4, dst.dims == 4, ((poolingType == ROI || poolingType == PSROI) && dst.size[0] ==rois.size[0] || src.size[0] == dst.size[0]), diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index 1e41585672..c1690f996f 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -254,7 +254,7 @@ public: } if (params.has("offset_h") || params.has("offset_w")) { - CV_Assert(!params.has("offset"), params.has("offset_h"), params.has("offset_w")); + CV_Assert_N(!params.has("offset"), params.has("offset_h"), params.has("offset_w")); getParams("offset_h", params, &_offsetsY); getParams("offset_w", params, &_offsetsX); CV_Assert(_offsetsX.size() == _offsetsY.size()); @@ -299,7 +299,8 @@ public: void finalize(const std::vector &inputs, std::vector &outputs) CV_OVERRIDE { - CV_Assert(inputs.size() > 1, inputs[0]->dims == 4, inputs[1]->dims == 4); + CV_CheckGT(inputs.size(), (size_t)1, ""); + CV_CheckEQ(inputs[0]->dims, 4, ""); CV_CheckEQ(inputs[1]->dims, 4, ""); int layerWidth = inputs[0]->size[3]; int layerHeight = inputs[0]->size[2]; diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index fdd33751f3..69814c0839 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -197,7 +197,7 @@ public: } else { - CV_Assert(inputs.size() == 2, total(inputs[0]) == total(inputs[1])); + CV_Assert_N(inputs.size() == 2, total(inputs[0]) == total(inputs[1])); outputs.assign(1, inputs[1]); } return true; diff --git a/modules/dnn/src/layers/resize_layer.cpp b/modules/dnn/src/layers/resize_layer.cpp index 78362da778..5ec5d40e54 100644 --- a/modules/dnn/src/layers/resize_layer.cpp +++ b/modules/dnn/src/layers/resize_layer.cpp @@ -43,7 +43,7 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { - CV_Assert(inputs.size() == 1, inputs[0].size() == 4); + CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4); outputs.resize(1, inputs[0]); outputs[0][2] = outHeight > 0 ? outHeight : (outputs[0][2] * zoomFactorHeight); outputs[0][3] = outWidth > 0 ? outWidth : (outputs[0][3] * zoomFactorWidth); @@ -106,7 +106,7 @@ public: const int inpSpatialSize = inpHeight * inpWidth; const int outSpatialSize = outHeight * outWidth; const int numPlanes = inp.size[0] * inp.size[1]; - CV_Assert(inp.isContinuous(), out.isContinuous()); + CV_Assert_N(inp.isContinuous(), out.isContinuous()); Mat inpPlanes = inp.reshape(1, numPlanes * inpHeight); Mat outPlanes = out.reshape(1, numPlanes * outHeight); @@ -184,7 +184,7 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { - CV_Assert(inputs.size() == 1, inputs[0].size() == 4); + CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4); outputs.resize(1, inputs[0]); outputs[0][2] = outHeight > 0 ? outHeight : (1 + zoomFactorHeight * (outputs[0][2] - 1)); outputs[0][3] = outWidth > 0 ? outWidth : (1 + zoomFactorWidth * (outputs[0][3] - 1)); diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index 3b53805e1e..9ab005ce20 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -64,7 +64,7 @@ public: { CV_TRACE_FUNCTION(); CV_TRACE_ARG_VALUE(name, "name", name.c_str()); - CV_Assert(outputs.size() == 1, !blobs.empty() || inputs.size() == 2); + CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2); Mat &inpBlob = *inputs[0]; Mat &outBlob = outputs[0]; @@ -76,7 +76,9 @@ public: weights = weights.reshape(1, 1); MatShape inpShape = shape(inpBlob); const int numWeights = !weights.empty() ? weights.total() : bias.total(); - CV_Assert(numWeights != 0, !hasWeights || !hasBias || weights.total() == bias.total()); + CV_Assert(numWeights != 0); + if (hasWeights && hasBias) + CV_CheckEQ(weights.total(), bias.total(), "Incompatible weights/bias blobs"); int endAxis; for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis) @@ -84,9 +86,9 @@ public: if (total(inpShape, axis, endAxis) == numWeights) break; } - CV_Assert(total(inpShape, axis, endAxis) == numWeights, - !hasBias || numWeights == bias.total(), - inpBlob.type() == CV_32F && outBlob.type() == CV_32F); + CV_Assert(total(inpShape, axis, endAxis) == numWeights); + CV_Assert(!hasBias || numWeights == bias.total()); + CV_CheckTypeEQ(inpBlob.type(), CV_32FC1, ""); CV_CheckTypeEQ(outBlob.type(), CV_32FC1, ""); int numSlices = total(inpShape, 0, axis); float* inpData = (float*)inpBlob.data; diff --git a/modules/dnn/src/nms.cpp b/modules/dnn/src/nms.cpp index 62bda79c15..051a9cbd28 100644 --- a/modules/dnn/src/nms.cpp +++ b/modules/dnn/src/nms.cpp @@ -25,7 +25,7 @@ void NMSBoxes(const std::vector& bboxes, const std::vector& scores, const float score_threshold, const float nms_threshold, std::vector& indices, const float eta, const int top_k) { - CV_Assert(bboxes.size() == scores.size(), score_threshold >= 0, + CV_Assert_N(bboxes.size() == scores.size(), score_threshold >= 0, nms_threshold >= 0, eta > 0); NMSFast_(bboxes, scores, score_threshold, nms_threshold, eta, top_k, indices, rectOverlap); } @@ -46,7 +46,7 @@ void NMSBoxes(const std::vector& bboxes, const std::vector& const float score_threshold, const float nms_threshold, std::vector& indices, const float eta, const int top_k) { - CV_Assert(bboxes.size() == scores.size(), score_threshold >= 0, + CV_Assert_N(bboxes.size() == scores.size(), score_threshold >= 0, nms_threshold >= 0, eta > 0); NMSFast_(bboxes, scores, score_threshold, nms_threshold, eta, top_k, indices, rotatedRectIOU); } diff --git a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp index 241b8af790..a766d2a024 100644 --- a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp +++ b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp @@ -221,7 +221,7 @@ public: std::vector& inputNodes) CV_OVERRIDE { Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor()); - CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1); + CV_CheckEQ(epsMat.total(), (size_t)1, ""); CV_CheckTypeEQ(epsMat.type(), CV_32FC1, ""); fusedNode->mutable_input()->RemoveLast(); fusedNode->clear_attr(); @@ -256,7 +256,7 @@ public: std::vector& inputNodes) CV_OVERRIDE { Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor()); - CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1); + CV_CheckEQ(epsMat.total(), (size_t)1, ""); CV_CheckTypeEQ(epsMat.type(), CV_32FC1, ""); fusedNode->mutable_input()->RemoveLast(); fusedNode->clear_attr(); @@ -593,7 +593,7 @@ public: std::vector& inputNodes) CV_OVERRIDE { Mat factorsMat = getTensorContent(inputNodes[1]->attr().at("value").tensor()); - CV_Assert(factorsMat.total() == 2, factorsMat.type() == CV_32SC1); + CV_CheckEQ(factorsMat.total(), (size_t)2, ""); CV_CheckTypeEQ(factorsMat.type(), CV_32SC1, ""); // Height scale factor tensorflow::TensorProto* factorY = inputNodes[1]->mutable_attr()->at("value").mutable_tensor(); diff --git a/modules/dnn/src/tensorflow/tf_importer.cpp b/modules/dnn/src/tensorflow/tf_importer.cpp index 98fb41ff94..97701a1826 100644 --- a/modules/dnn/src/tensorflow/tf_importer.cpp +++ b/modules/dnn/src/tensorflow/tf_importer.cpp @@ -545,8 +545,8 @@ const tensorflow::TensorProto& TFImporter::getConstBlob(const tensorflow::NodeDe } else { - CV_Assert(nodeIdx < netTxt.node_size(), - netTxt.node(nodeIdx).name() == kernel_inp.name); + CV_Assert_N(nodeIdx < netTxt.node_size(), + netTxt.node(nodeIdx).name() == kernel_inp.name); return netTxt.node(nodeIdx).attr().at("value").tensor(); } } @@ -587,8 +587,8 @@ static void addConstNodes(tensorflow::GraphDef& net, std::map& cons Mat qMin = getTensorContent(net.node(minId).attr().at("value").tensor()); Mat qMax = getTensorContent(net.node(maxId).attr().at("value").tensor()); - CV_Assert(qMin.total() == 1, qMin.type() == CV_32FC1, - qMax.total() == 1, qMax.type() == CV_32FC1); + CV_Assert_N(qMin.total() == 1, qMin.type() == CV_32FC1, + qMax.total() == 1, qMax.type() == CV_32FC1); Mat content = getTensorContent(*tensor); @@ -1295,8 +1295,9 @@ void TFImporter::populateNet(Net dstNet) CV_Assert(layer.input_size() == 3); Mat begins = getTensorContent(getConstBlob(layer, value_id, 1)); Mat sizes = getTensorContent(getConstBlob(layer, value_id, 2)); - CV_Assert(!begins.empty(), !sizes.empty(), begins.type() == CV_32SC1, - sizes.type() == CV_32SC1); + CV_Assert_N(!begins.empty(), !sizes.empty()); + CV_CheckTypeEQ(begins.type(), CV_32SC1, ""); + CV_CheckTypeEQ(sizes.type(), CV_32SC1, ""); if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC) { @@ -1665,7 +1666,7 @@ void TFImporter::populateNet(Net dstNet) if (layer.input_size() == 2) { Mat outSize = getTensorContent(getConstBlob(layer, value_id, 1)); - CV_Assert(outSize.type() == CV_32SC1, outSize.total() == 2); + CV_CheckTypeEQ(outSize.type(), CV_32SC1, ""); CV_CheckEQ(outSize.total(), (size_t)2, ""); layerParams.set("height", outSize.at(0, 0)); layerParams.set("width", outSize.at(0, 1)); } @@ -1673,8 +1674,8 @@ void TFImporter::populateNet(Net dstNet) { Mat factorHeight = getTensorContent(getConstBlob(layer, value_id, 1)); Mat factorWidth = getTensorContent(getConstBlob(layer, value_id, 2)); - CV_Assert(factorHeight.type() == CV_32SC1, factorHeight.total() == 1, - factorWidth.type() == CV_32SC1, factorWidth.total() == 1); + CV_CheckTypeEQ(factorHeight.type(), CV_32SC1, ""); CV_CheckEQ(factorHeight.total(), (size_t)1, ""); + CV_CheckTypeEQ(factorWidth.type(), CV_32SC1, ""); CV_CheckEQ(factorWidth.total(), (size_t)1, ""); layerParams.set("zoom_factor_x", factorWidth.at(0)); layerParams.set("zoom_factor_y", factorHeight.at(0)); } @@ -1772,7 +1773,7 @@ void TFImporter::populateNet(Net dstNet) CV_Assert(layer.input_size() == 3); Mat cropSize = getTensorContent(getConstBlob(layer, value_id, 2)); - CV_Assert(cropSize.type() == CV_32SC1, cropSize.total() == 2); + CV_CheckTypeEQ(cropSize.type(), CV_32SC1, ""); CV_CheckEQ(cropSize.total(), (size_t)2, ""); layerParams.set("height", cropSize.at(0)); layerParams.set("width", cropSize.at(1)); @@ -1826,8 +1827,8 @@ void TFImporter::populateNet(Net dstNet) Mat minValue = getTensorContent(getConstBlob(layer, value_id, 1)); Mat maxValue = getTensorContent(getConstBlob(layer, value_id, 2)); - CV_Assert(minValue.total() == 1, minValue.type() == CV_32F, - maxValue.total() == 1, maxValue.type() == CV_32F); + CV_CheckEQ(minValue.total(), (size_t)1, ""); CV_CheckTypeEQ(minValue.type(), CV_32FC1, ""); + CV_CheckEQ(maxValue.total(), (size_t)1, ""); CV_CheckTypeEQ(maxValue.type(), CV_32FC1, ""); layerParams.set("min_value", minValue.at(0)); layerParams.set("max_value", maxValue.at(0)); diff --git a/modules/dnn/src/torch/torch_importer.cpp b/modules/dnn/src/torch/torch_importer.cpp index 52bc0ce8a3..2338c73d96 100644 --- a/modules/dnn/src/torch/torch_importer.cpp +++ b/modules/dnn/src/torch/torch_importer.cpp @@ -896,8 +896,8 @@ struct TorchImporter else if (nnName == "SpatialZeroPadding" || nnName == "SpatialReflectionPadding") { readTorchTable(scalarParams, tensorParams); - CV_Assert(scalarParams.has("pad_l"), scalarParams.has("pad_r"), - scalarParams.has("pad_t"), scalarParams.has("pad_b")); + CV_Assert_N(scalarParams.has("pad_l"), scalarParams.has("pad_r"), + scalarParams.has("pad_t"), scalarParams.has("pad_b")); int padTop = scalarParams.get("pad_t"); int padLeft = scalarParams.get("pad_l"); int padRight = scalarParams.get("pad_r"); diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 77a326417c..93840fa20f 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -814,7 +814,7 @@ TEST_P(Layer_Test_DWconv_Prelu, Accuracy) const int group = 3; //outChannels=group when group>1 const int num_output = get<1>(GetParam()); const int kernel_depth = num_input/group; - CV_Assert(num_output >= group, num_output % group == 0, num_input % group == 0); + CV_Assert_N(num_output >= group, num_output % group == 0, num_input % group == 0); Net net; //layer 1: dwconv diff --git a/modules/highgui/src/window_w32.cpp b/modules/highgui/src/window_w32.cpp index 945b2e6e78..c6db001932 100644 --- a/modules/highgui/src/window_w32.cpp +++ b/modules/highgui/src/window_w32.cpp @@ -1500,7 +1500,7 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam ) rgn = CreateRectRgn(0, 0, wrc.right, wrc.bottom); rgn1 = CreateRectRgn(cr.left, cr.top, cr.right, cr.bottom); rgn2 = CreateRectRgn(tr.left, tr.top, tr.right, tr.bottom); - CV_Assert(rgn != 0, rgn1 != 0, rgn2 != 0); + CV_Assert_N(rgn != 0, rgn1 != 0, rgn2 != 0); ret = CombineRgn(rgn, rgn, rgn1, RGN_DIFF); ret = CombineRgn(rgn, rgn, rgn2, RGN_DIFF); diff --git a/samples/dnn/classification.cpp b/samples/dnn/classification.cpp index 7f8aa74b83..42bdc20dd2 100644 --- a/samples/dnn/classification.cpp +++ b/samples/dnn/classification.cpp @@ -49,7 +49,6 @@ int main(int argc, char** argv) float scale = parser.get("scale"); Scalar mean = parser.get("mean"); bool swapRB = parser.get("rgb"); - CV_Assert(parser.has("width"), parser.has("height")); int inpWidth = parser.get("width"); int inpHeight = parser.get("height"); String model = parser.get("model"); @@ -72,7 +71,13 @@ int main(int argc, char** argv) } } - CV_Assert(parser.has("model")); + if (!parser.check()) + { + parser.printErrors(); + return 1; + } + CV_Assert(!model.empty()); + //! [Read and initialize network] Net net = readNet(model, config, framework); net.setPreferableBackend(backendId); diff --git a/samples/dnn/custom_layers.hpp b/samples/dnn/custom_layers.hpp index a18bb9a5cf..8a3d5d88c1 100644 --- a/samples/dnn/custom_layers.hpp +++ b/samples/dnn/custom_layers.hpp @@ -108,7 +108,7 @@ public: } else { - CV_Assert(blobs.size() == 2, blobs[0].total() == 1, blobs[1].total() == 1); + CV_Assert(blobs.size() == 2); CV_Assert(blobs[0].total() == 1); CV_Assert(blobs[1].total() == 1); factorHeight = blobs[0].at(0, 0); factorWidth = blobs[1].at(0, 0); outHeight = outWidth = 0; diff --git a/samples/dnn/segmentation.cpp b/samples/dnn/segmentation.cpp index ce2147acd6..70e8d7b5b4 100644 --- a/samples/dnn/segmentation.cpp +++ b/samples/dnn/segmentation.cpp @@ -57,7 +57,6 @@ int main(int argc, char** argv) float scale = parser.get("scale"); Scalar mean = parser.get("mean"); bool swapRB = parser.get("rgb"); - CV_Assert(parser.has("width"), parser.has("height")); int inpWidth = parser.get("width"); int inpHeight = parser.get("height"); String model = parser.get("model"); @@ -99,7 +98,13 @@ int main(int argc, char** argv) } } - CV_Assert(parser.has("model")); + if (!parser.check()) + { + parser.printErrors(); + return 1; + } + + CV_Assert(!model.empty()); //! [Read and initialize network] Net net = readNet(model, config, framework); net.setPreferableBackend(backendId); diff --git a/samples/dnn/text_detection.cpp b/samples/dnn/text_detection.cpp index f69d13f124..e7b0f237d3 100644 --- a/samples/dnn/text_detection.cpp +++ b/samples/dnn/text_detection.cpp @@ -33,9 +33,16 @@ int main(int argc, char** argv) float nmsThreshold = parser.get("nms"); int inpWidth = parser.get("width"); int inpHeight = parser.get("height"); - CV_Assert(parser.has("model")); String model = parser.get("model"); + if (!parser.check()) + { + parser.printErrors(); + return 1; + } + + CV_Assert(!model.empty()); + // Load network. Net net = readNet(model); @@ -113,9 +120,9 @@ void decode(const Mat& scores, const Mat& geometry, float scoreThresh, std::vector& detections, std::vector& confidences) { detections.clear(); - CV_Assert(scores.dims == 4, geometry.dims == 4, scores.size[0] == 1, - geometry.size[0] == 1, scores.size[1] == 1, geometry.size[1] == 5, - scores.size[2] == geometry.size[2], scores.size[3] == geometry.size[3]); + CV_Assert(scores.dims == 4); CV_Assert(geometry.dims == 4); CV_Assert(scores.size[0] == 1); + CV_Assert(geometry.size[0] == 1); CV_Assert(scores.size[1] == 1); CV_Assert(geometry.size[1] == 5); + CV_Assert(scores.size[2] == geometry.size[2]); CV_Assert(scores.size[3] == geometry.size[3]); const int height = scores.size[2]; const int width = scores.size[3]; From 67d46dfc6ce938b40cdd731d0af6b4f6b7bf14ab Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 20 Aug 2018 13:54:03 +0300 Subject: [PATCH 2/8] core(intrin): restrict FP16 operations Intrinsics must be effective, so don't declare FP16 type/operations if there is no native support. - CV_FP16: supports load/store into/from float32 - CV_SIMD_FP16: declares FP16 types and native FP16 operations --- .../core/include/opencv2/core/hal/intrin.hpp | 35 ++++++++- .../include/opencv2/core/hal/intrin_avx.hpp | 59 +++------------ .../include/opencv2/core/hal/intrin_neon.hpp | 74 +++++++++++++------ .../include/opencv2/core/hal/intrin_sse.hpp | 52 ++----------- modules/core/test/test_intrin_utils.hpp | 44 +++++++++-- 5 files changed, 137 insertions(+), 127 deletions(-) diff --git a/modules/core/include/opencv2/core/hal/intrin.hpp b/modules/core/include/opencv2/core/hal/intrin.hpp index ff2d5160d2..6505f255cb 100644 --- a/modules/core/include/opencv2/core/hal/intrin.hpp +++ b/modules/core/include/opencv2/core/hal/intrin.hpp @@ -204,6 +204,18 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN #define CV_SIMD512_64F 0 #endif +#ifndef CV_SIMD128_FP16 +#define CV_SIMD128_FP16 0 +#endif + +#ifndef CV_SIMD256_FP16 +#define CV_SIMD256_FP16 0 +#endif + +#ifndef CV_SIMD512_FP16 +#define CV_SIMD512_FP16 0 +#endif + //================================================================================================== #define CV_INTRIN_DEFINE_WIDE_INTRIN(typ, vtyp, short_typ, prefix, loadsfx) \ @@ -274,8 +286,8 @@ template struct V_RegTraits #if CV_SIMD128_64F CV_DEF_REG_TRAITS(v, v_float64x2, double, f64, v_float64x2, void, void, v_int64x2, v_int32x4); #endif -#if CV_FP16 - CV_DEF_REG_TRAITS(v, v_float16x8, short, f16, v_float32x4, void, void, v_int16x8, v_int16x8); +#if CV_SIMD128_FP16 + CV_DEF_REG_TRAITS(v, v_float16x8, short, f16, v_float16x8, void, void, v_int16x8, v_int16x8); #endif #endif @@ -290,8 +302,8 @@ template struct V_RegTraits CV_DEF_REG_TRAITS(v256, v_uint64x4, uint64, u64, v_uint64x4, void, void, v_int64x4, void); CV_DEF_REG_TRAITS(v256, v_int64x4, int64, s64, v_uint64x4, void, void, v_int64x4, void); CV_DEF_REG_TRAITS(v256, v_float64x4, double, f64, v_float64x4, void, void, v_int64x4, v_int32x8); -#if CV_FP16 - CV_DEF_REG_TRAITS(v256, v_float16x16, short, f16, v_float32x8, void, void, v_int16x16, void); +#if CV_SIMD256_FP16 + CV_DEF_REG_TRAITS(v256, v_float16x16, short, f16, v_float16x16, void, void, v_int16x16, void); #endif #endif @@ -309,6 +321,7 @@ using namespace CV__SIMD_NAMESPACE; namespace CV__SIMD_NAMESPACE { #define CV_SIMD 1 #define CV_SIMD_64F CV_SIMD256_64F + #define CV_SIMD_FP16 CV_SIMD256_FP16 #define CV_SIMD_WIDTH 32 typedef v_uint8x32 v_uint8; typedef v_int8x32 v_int8; @@ -323,6 +336,10 @@ namespace CV__SIMD_NAMESPACE { typedef v_float64x4 v_float64; #endif #if CV_FP16 + #define vx_load_fp16_f32 v256_load_fp16_f32 + #define vx_store_fp16 v_store_fp16 + #endif + #if CV_SIMD256_FP16 typedef v_float16x16 v_float16; CV_INTRIN_DEFINE_WIDE_INTRIN(short, v_float16, f16, v256, load_f16) #endif @@ -336,6 +353,7 @@ using namespace CV__SIMD_NAMESPACE; namespace CV__SIMD_NAMESPACE { #define CV_SIMD CV_SIMD128 #define CV_SIMD_64F CV_SIMD128_64F + #define CV_SIMD_FP16 CV_SIMD128_FP16 #define CV_SIMD_WIDTH 16 typedef v_uint8x16 v_uint8; typedef v_int8x16 v_int8; @@ -350,6 +368,10 @@ namespace CV__SIMD_NAMESPACE { typedef v_float64x2 v_float64; #endif #if CV_FP16 + #define vx_load_fp16_f32 v128_load_fp16_f32 + #define vx_store_fp16 v_store_fp16 + #endif + #if CV_SIMD128_FP16 typedef v_float16x8 v_float16; CV_INTRIN_DEFINE_WIDE_INTRIN(short, v_float16, f16, v, load_f16) #endif @@ -393,6 +415,11 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END #define CV_SIMD_64F 0 #endif +#ifndef CV_SIMD_FP16 +#define CV_SIMD_FP16 0 //!< Defined to 1 on native support of operations with float16x8_t / float16x16_t (SIMD256) types +#endif + + #ifndef CV_SIMD #define CV_SIMD 0 #endif diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp index c64ff99f75..1c5ffbd1ca 100644 --- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp @@ -7,6 +7,7 @@ #define CV_SIMD256 1 #define CV_SIMD256_64F 1 +#define CV_SIMD256_FP16 0 // no native operations with FP16 type. Only load/store from float32x8 are available (if CV_FP16 == 1) namespace cv { @@ -262,26 +263,6 @@ struct v_float64x4 double get0() const { return _mm_cvtsd_f64(_mm256_castpd256_pd128(val)); } }; -struct v_float16x16 -{ - typedef short lane_type; - enum { nlanes = 16 }; - __m256i val; - - explicit v_float16x16(__m256i v) : val(v) {} - v_float16x16(short v0, short v1, short v2, short v3, - short v4, short v5, short v6, short v7, - short v8, short v9, short v10, short v11, - short v12, short v13, short v14, short v15) - { - val = _mm256_setr_epi16(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15); - } - v_float16x16() : val(_mm256_setzero_si256()) {} - short get0() const { return (short)_v_cvtsi256_si32(val); } -}; -inline v_float16x16 v256_setzero_f16() { return v_float16x16(_mm256_setzero_si256()); } -inline v_float16x16 v256_setall_f16(short val) { return v_float16x16(_mm256_set1_epi16(val)); } - //////////////// Load and store operations /////////////// #define OPENCV_HAL_IMPL_AVX_LOADSTORE(_Tpvec, _Tp) \ @@ -424,20 +405,18 @@ inline v_float64x4 v_reinterpret_as_f64(const v_float64x4& a) inline v_float64x4 v_reinterpret_as_f64(const v_float32x8& a) { return v_float64x4(_mm256_castps_pd(a.val)); } -inline v_float16x16 v256_load_f16(const short* ptr) -{ return v_float16x16(_mm256_loadu_si256((const __m256i*)ptr)); } -inline v_float16x16 v256_load_f16_aligned(const short* ptr) -{ return v_float16x16(_mm256_load_si256((const __m256i*)ptr)); } +#if CV_FP16 +inline v_float32x8 v256_load_fp16_f32(const short* ptr) +{ + return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr))); +} -inline v_float16x16 v256_load_f16_low(const short* ptr) -{ return v_float16x16(v256_load_low(ptr).val); } -inline v_float16x16 v256_load_f16_halves(const short* ptr0, const short* ptr1) -{ return v_float16x16(v256_load_halves(ptr0, ptr1).val); } - -inline void v_store(short* ptr, const v_float16x16& a) -{ _mm256_storeu_si256((__m256i*)ptr, a.val); } -inline void v_store_aligned(short* ptr, const v_float16x16& a) -{ _mm256_store_si256((__m256i*)ptr, a.val); } +inline void v_store_fp16(short* ptr, const v_float32x8& a) +{ + __m128i fp16_value = _mm256_cvtps_ph(a.val, 0); + _mm_store_si128((__m128i*)ptr, fp16_value); +} +#endif /* Recombine */ /*#define OPENCV_HAL_IMPL_AVX_COMBINE(_Tpvec, perm) \ @@ -1262,20 +1241,6 @@ inline v_float64x4 v_cvt_f64(const v_float32x8& a) inline v_float64x4 v_cvt_f64_high(const v_float32x8& a) { return v_float64x4(_mm256_cvtps_pd(_v256_extract_high(a.val))); } -#if CV_FP16 -inline v_float32x8 v_cvt_f32(const v_float16x16& a) -{ return v_float32x8(_mm256_cvtph_ps(_v256_extract_low(a.val))); } - -inline v_float32x8 v_cvt_f32_high(const v_float16x16& a) -{ return v_float32x8(_mm256_cvtph_ps(_v256_extract_high(a.val))); } - -inline v_float16x16 v_cvt_f16(const v_float32x8& a, const v_float32x8& b) -{ - __m128i ah = _mm256_cvtps_ph(a.val, 0), bh = _mm256_cvtps_ph(b.val, 0); - return v_float16x16(_mm256_inserti128_si256(_mm256_castsi128_si256(ah), bh, 1)); -} -#endif - ////////////// Lookup table access //////////////////// inline v_int32x8 v_lut(const int* tab, const v_int32x8& idxvec) diff --git a/modules/core/include/opencv2/core/hal/intrin_neon.hpp b/modules/core/include/opencv2/core/hal/intrin_neon.hpp index 73ca948e24..04b6ba2259 100644 --- a/modules/core/include/opencv2/core/hal/intrin_neon.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_neon.hpp @@ -62,6 +62,15 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN #define CV_SIMD128_64F 0 #endif +#ifndef CV_SIMD128_FP16 +# if CV_FP16 && (defined(__GNUC__) && __GNUC__ >= 5) // #12027: float16x8_t is missing in GCC 4.8.2 +# define CV_SIMD128_FP16 1 +# endif +#endif +#ifndef CV_SIMD128_FP16 +# define CV_SIMD128_FP16 0 +#endif + #if CV_SIMD128_64F #define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \ template static inline \ @@ -280,28 +289,9 @@ struct v_float64x2 #if CV_FP16 // Workaround for old compilers -static inline int16x8_t vreinterpretq_s16_f16(float16x8_t a) { return (int16x8_t)a; } -static inline float16x8_t vreinterpretq_f16_s16(int16x8_t a) { return (float16x8_t)a; } static inline int16x4_t vreinterpret_s16_f16(float16x4_t a) { return (int16x4_t)a; } static inline float16x4_t vreinterpret_f16_s16(int16x4_t a) { return (float16x4_t)a; } -static inline float16x8_t cv_vld1q_f16(const void* ptr) -{ -#ifndef vld1q_f16 // APPLE compiler defines vld1_f16 as macro - return vreinterpretq_f16_s16(vld1q_s16((const short*)ptr)); -#else - return vld1q_f16((const __fp16*)ptr); -#endif -} -static inline void cv_vst1q_f16(void* ptr, float16x8_t a) -{ -#ifndef vst1q_f16 // APPLE compiler defines vst1_f16 as macro - vst1q_s16((short*)ptr, vreinterpretq_s16_f16(a)); -#else - vst1q_f16((__fp16*)ptr, a); -#endif -} - static inline float16x4_t cv_vld1_f16(const void* ptr) { #ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro @@ -323,6 +313,45 @@ static inline void cv_vst1_f16(void* ptr, float16x4_t a) #define vdup_n_f16(v) (float16x4_t){v, v, v, v} #endif +#endif // CV_FP16 + +#if CV_FP16 +inline v_float32x4 v128_load_fp16_f32(const short* ptr) +{ + float16x4_t a = cv_vld1_f16((const __fp16*)ptr); + return v_float32x4(vcvt_f32_f16(a)); +} + +inline void v_store_fp16(short* ptr, const v_float32x4& a) +{ + float16x4_t fp16 = vcvt_f16_f32(a.val); + cv_vst1_f16((short*)ptr, fp16); +} +#endif + + +#if CV_SIMD128_FP16 +// Workaround for old compilers +static inline int16x8_t vreinterpretq_s16_f16(float16x8_t a) { return (int16x8_t)a; } +static inline float16x8_t vreinterpretq_f16_s16(int16x8_t a) { return (float16x8_t)a; } + +static inline float16x8_t cv_vld1q_f16(const void* ptr) +{ +#ifndef vld1q_f16 // APPLE compiler defines vld1_f16 as macro + return vreinterpretq_f16_s16(vld1q_s16((const short*)ptr)); +#else + return vld1q_f16((const __fp16*)ptr); +#endif +} +static inline void cv_vst1q_f16(void* ptr, float16x8_t a) +{ +#ifndef vst1q_f16 // APPLE compiler defines vst1_f16 as macro + vst1q_s16((short*)ptr, vreinterpretq_s16_f16(a)); +#else + vst1q_f16((__fp16*)ptr, a); +#endif +} + struct v_float16x8 { typedef short lane_type; @@ -344,7 +373,8 @@ struct v_float16x8 inline v_float16x8 v_setzero_f16() { return v_float16x8(vreinterpretq_f16_s16(vdupq_n_s16((short)0))); } inline v_float16x8 v_setall_f16(short v) { return v_float16x8(vreinterpretq_f16_s16(vdupq_n_s16(v))); } -#endif + +#endif // CV_SIMD128_FP16 #define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \ inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \ @@ -889,7 +919,7 @@ OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64) #endif -#if CV_FP16 +#if CV_SIMD128_FP16 // Workaround for old comiplers inline v_float16x8 v_load_f16(const short* ptr) { return v_float16x8(cv_vld1q_f16(ptr)); } @@ -1462,7 +1492,7 @@ inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) } #endif -#if CV_FP16 +#if CV_SIMD128_FP16 inline v_float32x4 v_cvt_f32(const v_float16x8& a) { return v_float32x4(vcvt_f32_f16(vget_low_f16(a.val))); diff --git a/modules/core/include/opencv2/core/hal/intrin_sse.hpp b/modules/core/include/opencv2/core/hal/intrin_sse.hpp index e58486fb5d..42a39d07f9 100644 --- a/modules/core/include/opencv2/core/hal/intrin_sse.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_sse.hpp @@ -50,6 +50,7 @@ #define CV_SIMD128 1 #define CV_SIMD128_64F 1 +#define CV_SIMD128_FP16 0 // no native operations with FP16 type. namespace cv { @@ -272,28 +273,6 @@ struct v_float64x2 __m128d val; }; -struct v_float16x8 -{ - typedef short lane_type; - typedef __m128i vector_type; - enum { nlanes = 8 }; - - v_float16x8() : val(_mm_setzero_si128()) {} - explicit v_float16x8(__m128i v) : val(v) {} - v_float16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7) - { - val = _mm_setr_epi16(v0, v1, v2, v3, v4, v5, v6, v7); - } - short get0() const - { - return (short)_mm_cvtsi128_si32(val); - } - - __m128i val; -}; -inline v_float16x8 v_setzero_f16() { return v_float16x8(_mm_setzero_si128()); } -inline v_float16x8 v_setall_f16(short val) { return v_float16x8(_mm_set1_epi16(val)); } - namespace hal_sse_internal { template @@ -1330,21 +1309,6 @@ inline void v_store_high(_Tp* ptr, const _Tpvec& a) \ OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float32x4, float, ps) OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float64x2, double, pd) -inline v_float16x8 v_load_f16(const short* ptr) -{ return v_float16x8(_mm_loadu_si128((const __m128i*)ptr)); } -inline v_float16x8 v_load_f16_aligned(const short* ptr) -{ return v_float16x8(_mm_load_si128((const __m128i*)ptr)); } - -inline v_float16x8 v_load_f16_low(const short* ptr) -{ return v_float16x8(v_load_low(ptr).val); } -inline v_float16x8 v_load_f16_halves(const short* ptr0, const short* ptr1) -{ return v_float16x8(v_load_halves(ptr0, ptr1).val); } - -inline void v_store(short* ptr, const v_float16x8& a) -{ _mm_storeu_si128((__m128i*)ptr, a.val); } -inline void v_store_aligned(short* ptr, const v_float16x8& a) -{ _mm_store_si128((__m128i*)ptr, a.val); } - #define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(_Tpvec, scalartype, func, suffix, sbit) \ inline scalartype v_reduce_##func(const v_##_Tpvec& a) \ { \ @@ -2622,19 +2586,15 @@ inline v_float64x2 v_cvt_f64_high(const v_float32x4& a) } #if CV_FP16 -inline v_float32x4 v_cvt_f32(const v_float16x8& a) +inline v_float32x4 v128_load_fp16_f32(const short* ptr) { - return v_float32x4(_mm_cvtph_ps(a.val)); + return v_float32x4(_mm_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr))); } -inline v_float32x4 v_cvt_f32_high(const v_float16x8& a) +inline void v_store_fp16(short* ptr, const v_float32x4& a) { - return v_float32x4(_mm_cvtph_ps(_mm_unpackhi_epi64(a.val, a.val))); -} - -inline v_float16x8 v_cvt_f16(const v_float32x4& a, const v_float32x4& b) -{ - return v_float16x8(_mm_unpacklo_epi64(_mm_cvtps_ph(a.val, 0), _mm_cvtps_ph(b.val, 0))); + __m128i fp16_value = _mm_cvtps_ph(a.val, 0); + _mm_storel_epi64((__m128i*)ptr, fp16_value); } #endif diff --git a/modules/core/test/test_intrin_utils.hpp b/modules/core/test/test_intrin_utils.hpp index cc9de4fc75..a1409f0979 100644 --- a/modules/core/test/test_intrin_utils.hpp +++ b/modules/core/test/test_intrin_utils.hpp @@ -1123,9 +1123,37 @@ template struct TheTest return *this; } +#if CV_FP16 + TheTest & test_loadstore_fp16_f32() + { + printf("test_loadstore_fp16_f32 ...\n"); + AlignedData data; data.a.clear(); + data.a.d[0] = 0x3c00; // 1.0 + data.a.d[R::nlanes - 1] = (unsigned short)0xc000; // -2.0 + AlignedData data_f32; data_f32.a.clear(); + AlignedData out; + + R r1 = vx_load_fp16_f32((short*)data.a.d); + R r2(r1); + EXPECT_EQ(1.0f, r1.get0()); + vx_store(data_f32.a.d, r2); + EXPECT_EQ(-2.0f, data_f32.a.d[R::nlanes - 1]); + + out.a.clear(); + vx_store_fp16((short*)out.a.d, r2); + for (int i = 0; i < R::nlanes; ++i) + { + EXPECT_EQ(data.a[i], out.a[i]) << "i=" << i; + } + + return *this; + } +#endif + +#if CV_SIMD_FP16 TheTest & test_loadstore_fp16() { -#if CV_FP16 && CV_SIMD + printf("test_loadstore_fp16 ...\n"); AlignedData data; AlignedData out; @@ -1149,12 +1177,10 @@ template struct TheTest EXPECT_EQ(data.a, out.a); return *this; -#endif } - TheTest & test_float_cvt_fp16() { -#if CV_FP16 && CV_SIMD + printf("test_float_cvt_fp16 ...\n"); AlignedData data; // check conversion @@ -1165,9 +1191,8 @@ template struct TheTest EXPECT_EQ(r3.get0(), r1.get0()); return *this; -#endif } - +#endif }; @@ -1448,11 +1473,14 @@ void test_hal_intrin_float64() void test_hal_intrin_float16() { DUMP_ENTRY(v_float16); -#if CV_SIMD_WIDTH > 16 +#if CV_FP16 + TheTest().test_loadstore_fp16_f32(); +#endif +#if CV_SIMD_FP16 TheTest() .test_loadstore_fp16() .test_float_cvt_fp16() - ; + ; #endif } #endif From 76f47548b3fc5b5fbfbeac923c88b4164ddd9e80 Mon Sep 17 00:00:00 2001 From: Colin Smith Date: Mon, 20 Aug 2018 14:10:54 -0700 Subject: [PATCH 3/8] Add export macro for ios conversion functions --- modules/imgcodecs/include/opencv2/imgcodecs/ios.h | 6 +++--- modules/imgcodecs/src/ios_conversions.mm | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/ios.h b/modules/imgcodecs/include/opencv2/imgcodecs/ios.h index fbd6371e58..a90c6d37a8 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs/ios.h +++ b/modules/imgcodecs/include/opencv2/imgcodecs/ios.h @@ -50,8 +50,8 @@ //! @addtogroup imgcodecs_ios //! @{ -UIImage* MatToUIImage(const cv::Mat& image); -void UIImageToMat(const UIImage* image, - cv::Mat& m, bool alphaExist = false); +CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image); +CV_EXPORTS void UIImageToMat(const UIImage* image, + cv::Mat& m, bool alphaExist = false); //! @} diff --git a/modules/imgcodecs/src/ios_conversions.mm b/modules/imgcodecs/src/ios_conversions.mm index 202cfe30d1..e3363c28ae 100644 --- a/modules/imgcodecs/src/ios_conversions.mm +++ b/modules/imgcodecs/src/ios_conversions.mm @@ -47,8 +47,8 @@ #include "opencv2/core.hpp" #include "precomp.hpp" -UIImage* MatToUIImage(const cv::Mat& image); -void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist); +CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image); +CV_EXPORTS void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist); UIImage* MatToUIImage(const cv::Mat& image) { From a527e8cc7324bc3e62dfa50ea30d33b2afc01022 Mon Sep 17 00:00:00 2001 From: Kaartic Sivaraam Date: Mon, 20 Aug 2018 22:11:41 +0530 Subject: [PATCH 4/8] cap-v4l: remove unwanted loop in V4L2 mainloop The while loop would run only once making it useless and leading to confusion. So, remove the unwanted while loop and just keep an infinite for loop. --- modules/videoio/src/cap_v4l.cpp | 60 +++++++++++++++------------------ 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/modules/videoio/src/cap_v4l.cpp b/modules/videoio/src/cap_v4l.cpp index e5c37281aa..0416231f65 100644 --- a/modules/videoio/src/cap_v4l.cpp +++ b/modules/videoio/src/cap_v4l.cpp @@ -857,45 +857,39 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) { } static int mainloop_v4l2(CvCaptureCAM_V4L* capture) { - unsigned int count; + for (;;) { + fd_set fds; + struct timeval tv; + int r; - count = 1; + FD_ZERO (&fds); + FD_SET (capture->deviceHandle, &fds); - while (count-- > 0) { - for (;;) { - fd_set fds; - struct timeval tv; - int r; + /* Timeout. */ + tv.tv_sec = 10; + tv.tv_usec = 0; - FD_ZERO (&fds); - FD_SET (capture->deviceHandle, &fds); + r = select (capture->deviceHandle+1, &fds, NULL, NULL, &tv); - /* Timeout. */ - tv.tv_sec = 10; - tv.tv_usec = 0; + if (-1 == r) { + if (EINTR == errno) + continue; - r = select (capture->deviceHandle+1, &fds, NULL, NULL, &tv); - - if (-1 == r) { - if (EINTR == errno) - continue; - - perror ("select"); - } - - if (0 == r) { - fprintf (stderr, "select timeout\n"); - - /* end the infinite loop */ - break; - } - - int returnCode = read_frame_v4l2 (capture); - if(returnCode == -1) - return -1; - if(returnCode == 1) - return 1; + perror ("select"); } + + if (0 == r) { + fprintf (stderr, "select timeout\n"); + + /* end the infinite loop */ + break; + } + + int returnCode = read_frame_v4l2 (capture); + if(returnCode == -1) + return -1; + if(returnCode == 1) + return 1; } return 0; } From 808c89adc10ee14e3a9737670a76c79ead564ef6 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Tue, 14 Aug 2018 15:39:32 +0300 Subject: [PATCH 5/8] Fixed windows build with InferenceEngine --- modules/dnn/src/layers/normalize_bbox_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 70d9b7385a..fbb29292c2 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -276,7 +276,7 @@ public: { auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, InferenceEngine::Layout::C, - {numChannels}); + {(size_t)numChannels}); weights->allocate(); std::vector ones(numChannels, 1); weights->set(ones); @@ -286,7 +286,7 @@ public: else { CV_Assert(numChannels == blobs[0].total()); - ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C); + ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C); ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0"; } ieLayer->params["eps"] = format("%f", epsilon); From f25450791b1adf93b2eeda83aa40f5281497016a Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 21 Aug 2018 16:11:24 +0300 Subject: [PATCH 6/8] dnn(test): mark unstable OpenCL tests --- modules/dnn/test/test_halide_layers.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 788f237b22..11d059223c 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -113,7 +113,11 @@ TEST_P(Convolution, Accuracy) bool skipCheck = false; if (cvtest::skipUnstableTests && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) && - kernel == Size(3, 1) && stride == Size(1, 1) && pad == Size(0, 1)) + ( + (kernel == Size(3, 1) && stride == Size(1, 1) && pad == Size(0, 1)) || + (stride.area() > 1 && !(pad.width == 0 && pad.height == 0)) + ) + ) skipCheck = true; int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width}; From 2c42361ecdcf3535aeb59277f90c211bb2bb937d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 22 Aug 2018 14:03:12 +0300 Subject: [PATCH 7/8] build: fix build with defined CV_STATIC_ANALYSIS --- modules/core/include/opencv2/core/base.hpp | 7 ++++++- modules/core/include/opencv2/core/cvdef.h | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/modules/core/include/opencv2/core/base.hpp b/modules/core/include/opencv2/core/base.hpp index 0342ebde52..389fa65705 100644 --- a/modules/core/include/opencv2/core/base.hpp +++ b/modules/core/include/opencv2/core/base.hpp @@ -414,7 +414,7 @@ CV_INLINE CV_NORETURN void errorNoReturn(int _code, const String& _err, const ch // We need to use simplified definition for them. #define CV_Error(...) do { abort(); } while (0) #define CV_Error_( code, args ) do { cv::format args; abort(); } while (0) -#define CV_Assert_1( expr ) do { if (!(expr)) abort(); } while (0) +#define CV_Assert( expr ) do { if (!(expr)) abort(); } while (0) #else // CV_STATIC_ANALYSIS @@ -473,6 +473,11 @@ configurations while CV_DbgAssert is only retained in the Debug configuration. //! @cond IGNORED +#if defined OPENCV_FORCE_MULTIARG_ASSERT_CHECK && defined CV_STATIC_ANALYSIS +#warning "OPENCV_FORCE_MULTIARG_ASSERT_CHECK can't be used with CV_STATIC_ANALYSIS" +#undef OPENCV_FORCE_MULTIARG_ASSERT_CHECK +#endif + #ifdef OPENCV_FORCE_MULTIARG_ASSERT_CHECK #define CV_Assert_1( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0) #else diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index c0f76d1654..56403b3191 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -349,7 +349,13 @@ Cv64suf; // We need to use simplified definition for them. #ifndef CV_STATIC_ANALYSIS # if defined(__KLOCWORK__) || defined(__clang_analyzer__) || defined(__COVERITY__) -# define CV_STATIC_ANALYSIS +# define CV_STATIC_ANALYSIS 1 +# endif +#else +# if defined(CV_STATIC_ANALYSIS) && !(__CV_CAT(1, CV_STATIC_ANALYSIS) == 1) // defined and not empty +# if 0 == CV_STATIC_ANALYSIS +# undef CV_STATIC_ANALYSIS +# endif # endif #endif From 096366738b44acabc4479b32793fdf0caaaf577d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 22 Aug 2018 16:04:40 +0300 Subject: [PATCH 8/8] dnn(build): fix CV_Assert() usage --- modules/dnn/src/dnn.cpp | 9 +++++---- modules/dnn/src/layers/prior_box_layer.cpp | 6 +++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index d8815a5f08..ad4741d008 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -588,7 +588,8 @@ struct DataLayer : public Layer lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); - CV_Assert(inputsData.size() == 1, inputsData[0].dims == 4); + CV_CheckEQ(inputsData.size(), (size_t)1, ""); + CV_CheckEQ(inputsData[0].dims, 4, ""); const size_t numChannels = inputsData[0].size[1]; CV_Assert(numChannels <= 4); @@ -1302,7 +1303,7 @@ struct Net::Impl if (!node.empty()) { Ptr ieNode = node.dynamicCast(); - CV_Assert(!ieNode.empty(), !ieNode->net.empty()); + CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty()); layerNet = ieNode->net; } } @@ -1316,7 +1317,7 @@ struct Net::Impl if (!inpNode.empty()) { Ptr ieInpNode = inpNode.dynamicCast(); - CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty()); + CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty()); if (layerNet != ieInpNode->net) { // layerNet is empty or nodes are from different graphs. @@ -1425,7 +1426,7 @@ struct Net::Impl if (!inpNode.empty()) { Ptr ieInpNode = inpNode.dynamicCast(); - CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty()); + CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty()); if (ieInpNode->net != net) { net = Ptr(); diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index c1690f996f..6be6efa8a6 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -487,8 +487,8 @@ public: if (_explicitSizes) { - CV_Assert(!_boxWidths.empty(), !_boxHeights.empty(), - _boxWidths.size() == _boxHeights.size()); + CV_Assert(!_boxWidths.empty()); CV_Assert(!_boxHeights.empty()); + CV_Assert(_boxWidths.size() == _boxHeights.size()); ieLayer->params["width"] = format("%f", _boxWidths[0]); ieLayer->params["height"] = format("%f", _boxHeights[0]); for (int i = 1; i < _boxWidths.size(); ++i) @@ -530,7 +530,7 @@ public: ieLayer->params["step_h"] = format("%f", _stepY); ieLayer->params["step_w"] = format("%f", _stepX); } - CV_Assert(_offsetsX.size() == 1, _offsetsY.size() == 1, _offsetsX[0] == _offsetsY[0]); + CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], ""); ieLayer->params["offset"] = format("%f", _offsetsX[0]); return Ptr(new InfEngineBackendNode(ieLayer));