Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2018-08-22 17:38:51 +03:00
commit 6356403964
36 changed files with 316 additions and 277 deletions

View File

@ -389,7 +389,7 @@ CV_EXPORTS CV_NORETURN void error(int _code, const String& _err, const char* _fu
// We need to use simplified definition for them.
#define CV_Error(...) do { abort(); } while (0)
#define CV_Error_( code, args ) do { cv::format args; abort(); } while (0)
#define CV_Assert_1( expr ) do { if (!(expr)) abort(); } while (0)
#define CV_Assert( expr ) do { if (!(expr)) abort(); } while (0)
#else // CV_STATIC_ANALYSIS
@ -419,7 +419,13 @@ for example:
*/
#define CV_Error_( code, args ) cv::error( code, cv::format args, CV_Func, __FILE__, __LINE__ )
#define CV_Assert_1( expr ) if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ )
/** @brief Checks a condition at runtime and throws exception if it fails
The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros
raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
configurations while CV_DbgAssert is only retained in the Debug configuration.
*/
#define CV_Assert( expr ) do { if(!!(expr)) ; else cv::error( cv::Error::StsAssert, #expr, CV_Func, __FILE__, __LINE__ ); } while(0)
#endif // CV_STATIC_ANALYSIS
@ -432,8 +438,8 @@ for example:
#define CV_ErrorNoReturn_ CV_Error_
#endif
#endif
//! @endcond
#define CV_Assert_1 CV_Assert
#define CV_Assert_2( expr1, expr2 ) CV_Assert_1(expr1); CV_Assert_1(expr2)
#define CV_Assert_3( expr1, expr2, expr3 ) CV_Assert_2(expr1, expr2); CV_Assert_1(expr3)
#define CV_Assert_4( expr1, expr2, expr3, expr4 ) CV_Assert_3(expr1, expr2, expr3); CV_Assert_1(expr4)
@ -444,21 +450,14 @@ for example:
#define CV_Assert_9( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ) CV_Assert_8(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8 ); CV_Assert_1(expr9)
#define CV_Assert_10( expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9, expr10 ) CV_Assert_9(expr1, expr2, expr3, expr4, expr5, expr6, expr7, expr8, expr9 ); CV_Assert_1(expr10)
#define CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
#define CV_VA_NUM_ARGS(...) CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define CV_Assert_N(...) do { __CV_CAT(CV_Assert_, __CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__); } while(0)
/** @brief Checks a condition at runtime and throws exception if it fails
//! @endcond
The macros CV_Assert (and CV_DbgAssert(expr)) evaluate the specified expression. If it is 0, the macros
raise an error (see cv::error). The macro CV_Assert checks the condition in both Debug and Release
configurations while CV_DbgAssert is only retained in the Debug configuration.
*/
#define CV_Assert(...) do { CVAUX_CONCAT(CV_Assert_, CV_VA_NUM_ARGS(__VA_ARGS__)) (__VA_ARGS__); } while(0)
/** replaced with CV_Assert(expr) in Debug configuration */
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
# define CV_DbgAssert(expr) CV_Assert(expr)
#else
/** replaced with CV_Assert(expr) in Debug configuration */
# define CV_DbgAssert(expr)
#endif

View File

@ -79,6 +79,8 @@ namespace cv { namespace debug_build_guard { } using namespace debug_build_guard
#define __CV_CAT(x, y) __CV_CAT_(x, y)
#endif
#define __CV_VA_NUM_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
#define __CV_VA_NUM_ARGS(...) __CV_VA_NUM_ARGS_HELPER(__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
// undef problematic defines sometimes defined by system headers (windows.h in particular)
#undef small
@ -347,7 +349,13 @@ Cv64suf;
// We need to use simplified definition for them.
#ifndef CV_STATIC_ANALYSIS
# if defined(__KLOCWORK__) || defined(__clang_analyzer__) || defined(__COVERITY__)
# define CV_STATIC_ANALYSIS
# define CV_STATIC_ANALYSIS 1
# endif
#else
# if defined(CV_STATIC_ANALYSIS) && !(__CV_CAT(1, CV_STATIC_ANALYSIS) == 1) // defined and not empty
# if 0 == CV_STATIC_ANALYSIS
# undef CV_STATIC_ANALYSIS
# endif
# endif
#endif

View File

@ -204,6 +204,18 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
#define CV_SIMD512_64F 0
#endif
#ifndef CV_SIMD128_FP16
#define CV_SIMD128_FP16 0
#endif
#ifndef CV_SIMD256_FP16
#define CV_SIMD256_FP16 0
#endif
#ifndef CV_SIMD512_FP16
#define CV_SIMD512_FP16 0
#endif
//==================================================================================================
#define CV_INTRIN_DEFINE_WIDE_INTRIN(typ, vtyp, short_typ, prefix, loadsfx) \
@ -274,8 +286,8 @@ template<typename _Tp> struct V_RegTraits
#if CV_SIMD128_64F
CV_DEF_REG_TRAITS(v, v_float64x2, double, f64, v_float64x2, void, void, v_int64x2, v_int32x4);
#endif
#if CV_FP16
CV_DEF_REG_TRAITS(v, v_float16x8, short, f16, v_float32x4, void, void, v_int16x8, v_int16x8);
#if CV_SIMD128_FP16
CV_DEF_REG_TRAITS(v, v_float16x8, short, f16, v_float16x8, void, void, v_int16x8, v_int16x8);
#endif
#endif
@ -290,8 +302,8 @@ template<typename _Tp> struct V_RegTraits
CV_DEF_REG_TRAITS(v256, v_uint64x4, uint64, u64, v_uint64x4, void, void, v_int64x4, void);
CV_DEF_REG_TRAITS(v256, v_int64x4, int64, s64, v_uint64x4, void, void, v_int64x4, void);
CV_DEF_REG_TRAITS(v256, v_float64x4, double, f64, v_float64x4, void, void, v_int64x4, v_int32x8);
#if CV_FP16
CV_DEF_REG_TRAITS(v256, v_float16x16, short, f16, v_float32x8, void, void, v_int16x16, void);
#if CV_SIMD256_FP16
CV_DEF_REG_TRAITS(v256, v_float16x16, short, f16, v_float16x16, void, void, v_int16x16, void);
#endif
#endif
@ -309,6 +321,7 @@ using namespace CV__SIMD_NAMESPACE;
namespace CV__SIMD_NAMESPACE {
#define CV_SIMD 1
#define CV_SIMD_64F CV_SIMD256_64F
#define CV_SIMD_FP16 CV_SIMD256_FP16
#define CV_SIMD_WIDTH 32
typedef v_uint8x32 v_uint8;
typedef v_int8x32 v_int8;
@ -323,6 +336,10 @@ namespace CV__SIMD_NAMESPACE {
typedef v_float64x4 v_float64;
#endif
#if CV_FP16
#define vx_load_fp16_f32 v256_load_fp16_f32
#define vx_store_fp16 v_store_fp16
#endif
#if CV_SIMD256_FP16
typedef v_float16x16 v_float16;
CV_INTRIN_DEFINE_WIDE_INTRIN(short, v_float16, f16, v256, load_f16)
#endif
@ -336,6 +353,7 @@ using namespace CV__SIMD_NAMESPACE;
namespace CV__SIMD_NAMESPACE {
#define CV_SIMD CV_SIMD128
#define CV_SIMD_64F CV_SIMD128_64F
#define CV_SIMD_FP16 CV_SIMD128_FP16
#define CV_SIMD_WIDTH 16
typedef v_uint8x16 v_uint8;
typedef v_int8x16 v_int8;
@ -350,6 +368,10 @@ namespace CV__SIMD_NAMESPACE {
typedef v_float64x2 v_float64;
#endif
#if CV_FP16
#define vx_load_fp16_f32 v128_load_fp16_f32
#define vx_store_fp16 v_store_fp16
#endif
#if CV_SIMD128_FP16
typedef v_float16x8 v_float16;
CV_INTRIN_DEFINE_WIDE_INTRIN(short, v_float16, f16, v, load_f16)
#endif
@ -393,6 +415,11 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
#define CV_SIMD_64F 0
#endif
#ifndef CV_SIMD_FP16
#define CV_SIMD_FP16 0 //!< Defined to 1 on native support of operations with float16x8_t / float16x16_t (SIMD256) types
#endif
#ifndef CV_SIMD
#define CV_SIMD 0
#endif

View File

@ -7,6 +7,7 @@
#define CV_SIMD256 1
#define CV_SIMD256_64F 1
#define CV_SIMD256_FP16 0 // no native operations with FP16 type. Only load/store from float32x8 are available (if CV_FP16 == 1)
namespace cv
{
@ -262,26 +263,6 @@ struct v_float64x4
double get0() const { return _mm_cvtsd_f64(_mm256_castpd256_pd128(val)); }
};
struct v_float16x16
{
typedef short lane_type;
enum { nlanes = 16 };
__m256i val;
explicit v_float16x16(__m256i v) : val(v) {}
v_float16x16(short v0, short v1, short v2, short v3,
short v4, short v5, short v6, short v7,
short v8, short v9, short v10, short v11,
short v12, short v13, short v14, short v15)
{
val = _mm256_setr_epi16(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15);
}
v_float16x16() : val(_mm256_setzero_si256()) {}
short get0() const { return (short)_v_cvtsi256_si32(val); }
};
inline v_float16x16 v256_setzero_f16() { return v_float16x16(_mm256_setzero_si256()); }
inline v_float16x16 v256_setall_f16(short val) { return v_float16x16(_mm256_set1_epi16(val)); }
//////////////// Load and store operations ///////////////
#define OPENCV_HAL_IMPL_AVX_LOADSTORE(_Tpvec, _Tp) \
@ -424,20 +405,18 @@ inline v_float64x4 v_reinterpret_as_f64(const v_float64x4& a)
inline v_float64x4 v_reinterpret_as_f64(const v_float32x8& a)
{ return v_float64x4(_mm256_castps_pd(a.val)); }
inline v_float16x16 v256_load_f16(const short* ptr)
{ return v_float16x16(_mm256_loadu_si256((const __m256i*)ptr)); }
inline v_float16x16 v256_load_f16_aligned(const short* ptr)
{ return v_float16x16(_mm256_load_si256((const __m256i*)ptr)); }
#if CV_FP16
inline v_float32x8 v256_load_fp16_f32(const short* ptr)
{
return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr)));
}
inline v_float16x16 v256_load_f16_low(const short* ptr)
{ return v_float16x16(v256_load_low(ptr).val); }
inline v_float16x16 v256_load_f16_halves(const short* ptr0, const short* ptr1)
{ return v_float16x16(v256_load_halves(ptr0, ptr1).val); }
inline void v_store(short* ptr, const v_float16x16& a)
{ _mm256_storeu_si256((__m256i*)ptr, a.val); }
inline void v_store_aligned(short* ptr, const v_float16x16& a)
{ _mm256_store_si256((__m256i*)ptr, a.val); }
inline void v_store_fp16(short* ptr, const v_float32x8& a)
{
__m128i fp16_value = _mm256_cvtps_ph(a.val, 0);
_mm_store_si128((__m128i*)ptr, fp16_value);
}
#endif
/* Recombine */
/*#define OPENCV_HAL_IMPL_AVX_COMBINE(_Tpvec, perm) \
@ -1262,20 +1241,6 @@ inline v_float64x4 v_cvt_f64(const v_float32x8& a)
inline v_float64x4 v_cvt_f64_high(const v_float32x8& a)
{ return v_float64x4(_mm256_cvtps_pd(_v256_extract_high(a.val))); }
#if CV_FP16
inline v_float32x8 v_cvt_f32(const v_float16x16& a)
{ return v_float32x8(_mm256_cvtph_ps(_v256_extract_low(a.val))); }
inline v_float32x8 v_cvt_f32_high(const v_float16x16& a)
{ return v_float32x8(_mm256_cvtph_ps(_v256_extract_high(a.val))); }
inline v_float16x16 v_cvt_f16(const v_float32x8& a, const v_float32x8& b)
{
__m128i ah = _mm256_cvtps_ph(a.val, 0), bh = _mm256_cvtps_ph(b.val, 0);
return v_float16x16(_mm256_inserti128_si256(_mm256_castsi128_si256(ah), bh, 1));
}
#endif
////////////// Lookup table access ////////////////////
inline v_int32x8 v_lut(const int* tab, const v_int32x8& idxvec)

View File

@ -62,6 +62,15 @@ CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
#define CV_SIMD128_64F 0
#endif
#ifndef CV_SIMD128_FP16
# if CV_FP16 && (defined(__GNUC__) && __GNUC__ >= 5) // #12027: float16x8_t is missing in GCC 4.8.2
# define CV_SIMD128_FP16 1
# endif
#endif
#ifndef CV_SIMD128_FP16
# define CV_SIMD128_FP16 0
#endif
#if CV_SIMD128_64F
#define OPENCV_HAL_IMPL_NEON_REINTERPRET(_Tpv, suffix) \
template <typename T> static inline \
@ -280,28 +289,9 @@ struct v_float64x2
#if CV_FP16
// Workaround for old compilers
static inline int16x8_t vreinterpretq_s16_f16(float16x8_t a) { return (int16x8_t)a; }
static inline float16x8_t vreinterpretq_f16_s16(int16x8_t a) { return (float16x8_t)a; }
static inline int16x4_t vreinterpret_s16_f16(float16x4_t a) { return (int16x4_t)a; }
static inline float16x4_t vreinterpret_f16_s16(int16x4_t a) { return (float16x4_t)a; }
static inline float16x8_t cv_vld1q_f16(const void* ptr)
{
#ifndef vld1q_f16 // APPLE compiler defines vld1_f16 as macro
return vreinterpretq_f16_s16(vld1q_s16((const short*)ptr));
#else
return vld1q_f16((const __fp16*)ptr);
#endif
}
static inline void cv_vst1q_f16(void* ptr, float16x8_t a)
{
#ifndef vst1q_f16 // APPLE compiler defines vst1_f16 as macro
vst1q_s16((short*)ptr, vreinterpretq_s16_f16(a));
#else
vst1q_f16((__fp16*)ptr, a);
#endif
}
static inline float16x4_t cv_vld1_f16(const void* ptr)
{
#ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro
@ -323,6 +313,45 @@ static inline void cv_vst1_f16(void* ptr, float16x4_t a)
#define vdup_n_f16(v) (float16x4_t){v, v, v, v}
#endif
#endif // CV_FP16
#if CV_FP16
inline v_float32x4 v128_load_fp16_f32(const short* ptr)
{
float16x4_t a = cv_vld1_f16((const __fp16*)ptr);
return v_float32x4(vcvt_f32_f16(a));
}
inline void v_store_fp16(short* ptr, const v_float32x4& a)
{
float16x4_t fp16 = vcvt_f16_f32(a.val);
cv_vst1_f16((short*)ptr, fp16);
}
#endif
#if CV_SIMD128_FP16
// Workaround for old compilers
static inline int16x8_t vreinterpretq_s16_f16(float16x8_t a) { return (int16x8_t)a; }
static inline float16x8_t vreinterpretq_f16_s16(int16x8_t a) { return (float16x8_t)a; }
static inline float16x8_t cv_vld1q_f16(const void* ptr)
{
#ifndef vld1q_f16 // APPLE compiler defines vld1_f16 as macro
return vreinterpretq_f16_s16(vld1q_s16((const short*)ptr));
#else
return vld1q_f16((const __fp16*)ptr);
#endif
}
static inline void cv_vst1q_f16(void* ptr, float16x8_t a)
{
#ifndef vst1q_f16 // APPLE compiler defines vst1_f16 as macro
vst1q_s16((short*)ptr, vreinterpretq_s16_f16(a));
#else
vst1q_f16((__fp16*)ptr, a);
#endif
}
struct v_float16x8
{
typedef short lane_type;
@ -344,7 +373,8 @@ struct v_float16x8
inline v_float16x8 v_setzero_f16() { return v_float16x8(vreinterpretq_f16_s16(vdupq_n_s16((short)0))); }
inline v_float16x8 v_setall_f16(short v) { return v_float16x8(vreinterpretq_f16_s16(vdupq_n_s16(v))); }
#endif
#endif // CV_SIMD128_FP16
#define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \
inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \
@ -889,7 +919,7 @@ OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32)
OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64)
#endif
#if CV_FP16
#if CV_SIMD128_FP16
// Workaround for old comiplers
inline v_float16x8 v_load_f16(const short* ptr)
{ return v_float16x8(cv_vld1q_f16(ptr)); }
@ -1462,7 +1492,7 @@ inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
}
#endif
#if CV_FP16
#if CV_SIMD128_FP16
inline v_float32x4 v_cvt_f32(const v_float16x8& a)
{
return v_float32x4(vcvt_f32_f16(vget_low_f16(a.val)));

View File

@ -50,6 +50,7 @@
#define CV_SIMD128 1
#define CV_SIMD128_64F 1
#define CV_SIMD128_FP16 0 // no native operations with FP16 type.
namespace cv
{
@ -272,28 +273,6 @@ struct v_float64x2
__m128d val;
};
struct v_float16x8
{
typedef short lane_type;
typedef __m128i vector_type;
enum { nlanes = 8 };
v_float16x8() : val(_mm_setzero_si128()) {}
explicit v_float16x8(__m128i v) : val(v) {}
v_float16x8(short v0, short v1, short v2, short v3, short v4, short v5, short v6, short v7)
{
val = _mm_setr_epi16(v0, v1, v2, v3, v4, v5, v6, v7);
}
short get0() const
{
return (short)_mm_cvtsi128_si32(val);
}
__m128i val;
};
inline v_float16x8 v_setzero_f16() { return v_float16x8(_mm_setzero_si128()); }
inline v_float16x8 v_setall_f16(short val) { return v_float16x8(_mm_set1_epi16(val)); }
namespace hal_sse_internal
{
template <typename to_sse_type, typename from_sse_type>
@ -1330,21 +1309,6 @@ inline void v_store_high(_Tp* ptr, const _Tpvec& a) \
OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float32x4, float, ps)
OPENCV_HAL_IMPL_SSE_LOADSTORE_FLT_OP(v_float64x2, double, pd)
inline v_float16x8 v_load_f16(const short* ptr)
{ return v_float16x8(_mm_loadu_si128((const __m128i*)ptr)); }
inline v_float16x8 v_load_f16_aligned(const short* ptr)
{ return v_float16x8(_mm_load_si128((const __m128i*)ptr)); }
inline v_float16x8 v_load_f16_low(const short* ptr)
{ return v_float16x8(v_load_low(ptr).val); }
inline v_float16x8 v_load_f16_halves(const short* ptr0, const short* ptr1)
{ return v_float16x8(v_load_halves(ptr0, ptr1).val); }
inline void v_store(short* ptr, const v_float16x8& a)
{ _mm_storeu_si128((__m128i*)ptr, a.val); }
inline void v_store_aligned(short* ptr, const v_float16x8& a)
{ _mm_store_si128((__m128i*)ptr, a.val); }
#define OPENCV_HAL_IMPL_SSE_REDUCE_OP_8(_Tpvec, scalartype, func, suffix, sbit) \
inline scalartype v_reduce_##func(const v_##_Tpvec& a) \
{ \
@ -2622,19 +2586,15 @@ inline v_float64x2 v_cvt_f64_high(const v_float32x4& a)
}
#if CV_FP16
inline v_float32x4 v_cvt_f32(const v_float16x8& a)
inline v_float32x4 v128_load_fp16_f32(const short* ptr)
{
return v_float32x4(_mm_cvtph_ps(a.val));
return v_float32x4(_mm_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr)));
}
inline v_float32x4 v_cvt_f32_high(const v_float16x8& a)
inline void v_store_fp16(short* ptr, const v_float32x4& a)
{
return v_float32x4(_mm_cvtph_ps(_mm_unpackhi_epi64(a.val, a.val)));
}
inline v_float16x8 v_cvt_f16(const v_float32x4& a, const v_float32x4& b)
{
return v_float16x8(_mm_unpacklo_epi64(_mm_cvtps_ph(a.val, 0), _mm_cvtps_ph(b.val, 0)));
__m128i fp16_value = _mm_cvtps_ph(a.val, 0);
_mm_storel_epi64((__m128i*)ptr, fp16_value);
}
#endif

View File

@ -796,7 +796,7 @@ static bool ocl_gemm( InputArray matA, InputArray matB, double alpha,
int depth = matA.depth(), cn = matA.channels();
int type = CV_MAKETYPE(depth, cn);
CV_Assert( type == matB.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
CV_Assert_N( type == matB.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
const ocl::Device & dev = ocl::Device::getDefault();
bool doubleSupport = dev.doubleFPConfig() > 0;
@ -1555,7 +1555,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
Size a_size = A.size(), d_size;
int len = 0, type = A.type();
CV_Assert( type == B.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
CV_Assert_N( type == B.type(), (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
switch( flags & (GEMM_1_T|GEMM_2_T) )
{
@ -1583,7 +1583,7 @@ void cv::gemm( InputArray matA, InputArray matB, double alpha,
if( !C.empty() )
{
CV_Assert( C.type() == type,
CV_Assert_N( C.type() == type,
(((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) ||
((flags&GEMM_3_T) != 0 && C.rows == d_size.width && C.cols == d_size.height)));
}
@ -2457,7 +2457,7 @@ void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean,
{
CV_INSTRUMENT_REGION()
CV_Assert( data, nsamples > 0 );
CV_Assert_N( data, nsamples > 0 );
Size size = data[0].size();
int sz = size.width * size.height, esz = (int)data[0].elemSize();
int type = data[0].type();
@ -2480,7 +2480,7 @@ void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean,
for( int i = 0; i < nsamples; i++ )
{
CV_Assert( data[i].size() == size, data[i].type() == type );
CV_Assert_N( data[i].size() == size, data[i].type() == type );
if( data[i].isContinuous() )
memcpy( _data.ptr(i), data[i].ptr(), sz*esz );
else
@ -2516,7 +2516,7 @@ void cv::calcCovarMatrix( InputArray _src, OutputArray _covar, InputOutputArray
int i = 0;
for(std::vector<cv::Mat>::iterator each = src.begin(); each != src.end(); ++each, ++i )
{
CV_Assert( (*each).size() == size, (*each).type() == type );
CV_Assert_N( (*each).size() == size, (*each).type() == type );
Mat dataRow(size.height, size.width, type, _data.ptr(i));
(*each).copyTo(dataRow);
}
@ -2595,7 +2595,7 @@ double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar )
AutoBuffer<double> buf(len);
double result = 0;
CV_Assert( type == v2.type(), type == icovar.type(),
CV_Assert_N( type == v2.type(), type == icovar.type(),
sz == v2.size(), len == icovar.rows && len == icovar.cols );
sz.width *= v1.channels();
@ -2888,7 +2888,7 @@ void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata,
if( !delta.empty() )
{
CV_Assert( delta.channels() == 1,
CV_Assert_N( delta.channels() == 1,
(delta.rows == src.rows || delta.rows == 1),
(delta.cols == src.cols || delta.cols == 1));
if( delta.type() != dtype )
@ -3291,7 +3291,7 @@ double Mat::dot(InputArray _mat) const
Mat mat = _mat.getMat();
int cn = channels();
DotProdFunc func = getDotProdFunc(depth());
CV_Assert( mat.type() == type(), mat.size == size, func != 0 );
CV_Assert_N( mat.type() == type(), mat.size == size, func != 0 );
if( isContinuous() && mat.isContinuous() )
{
@ -3327,7 +3327,7 @@ CV_IMPL void cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha,
if( Carr )
C = cv::cvarrToMat(Carr);
CV_Assert( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)),
CV_Assert_N( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)),
(D.cols == ((flags & CV_GEMM_B_T) == 0 ? B.cols : B.rows)),
D.type() == A.type() );
@ -3350,7 +3350,7 @@ cvTransform( const CvArr* srcarr, CvArr* dstarr,
m = _m;
}
CV_Assert( dst.depth() == src.depth(), dst.channels() == m.rows );
CV_Assert_N( dst.depth() == src.depth(), dst.channels() == m.rows );
cv::transform( src, dst, m );
}
@ -3360,7 +3360,7 @@ cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat )
{
cv::Mat m = cv::cvarrToMat(mat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
CV_Assert( dst.type() == src.type(), dst.channels() == m.rows-1 );
CV_Assert_N( dst.type() == src.type(), dst.channels() == m.rows-1 );
cv::perspectiveTransform( src, dst, m );
}
@ -3370,7 +3370,7 @@ CV_IMPL void cvScaleAdd( const CvArr* srcarr1, CvScalar scale,
{
cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
CV_Assert( src1.size == dst.size, src1.type() == dst.type() );
CV_Assert_N( src1.size == dst.size, src1.type() == dst.type() );
cv::scaleAdd( src1, scale.val[0], cv::cvarrToMat(srcarr2), dst );
}
@ -3380,7 +3380,7 @@ cvCalcCovarMatrix( const CvArr** vecarr, int count,
CvArr* covarr, CvArr* avgarr, int flags )
{
cv::Mat cov0 = cv::cvarrToMat(covarr), cov = cov0, mean0, mean;
CV_Assert( vecarr != 0, count >= 1 );
CV_Assert_N( vecarr != 0, count >= 1 );
if( avgarr )
mean = mean0 = cv::cvarrToMat(avgarr);
@ -3460,7 +3460,7 @@ cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigen
int ecount0 = evals0.cols + evals0.rows - 1;
int ecount = evals.cols + evals.rows - 1;
CV_Assert( (evals0.cols == 1 || evals0.rows == 1),
CV_Assert_N( (evals0.cols == 1 || evals0.rows == 1),
ecount0 <= ecount,
evects0.cols == evects.cols,
evects0.rows == ecount0 );
@ -3491,12 +3491,12 @@ cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr,
int n;
if( mean.rows == 1 )
{
CV_Assert(dst.cols <= evects.rows, dst.rows == data.rows);
CV_Assert_N(dst.cols <= evects.rows, dst.rows == data.rows);
n = dst.cols;
}
else
{
CV_Assert(dst.rows <= evects.rows, dst.cols == data.cols);
CV_Assert_N(dst.rows <= evects.rows, dst.cols == data.cols);
n = dst.rows;
}
pca.eigenvectors = evects.rowRange(0, n);
@ -3522,12 +3522,12 @@ cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr,
int n;
if( mean.rows == 1 )
{
CV_Assert(data.cols <= evects.rows, dst.rows == data.rows);
CV_Assert_N(data.cols <= evects.rows, dst.rows == data.rows);
n = data.cols;
}
else
{
CV_Assert(data.rows <= evects.rows, dst.cols == data.cols);
CV_Assert_N(data.rows <= evects.rows, dst.cols == data.cols);
n = data.rows;
}
pca.eigenvectors = evects.rowRange(0, n);

View File

@ -1123,9 +1123,37 @@ template<typename R> struct TheTest
return *this;
}
#if CV_FP16
TheTest & test_loadstore_fp16_f32()
{
printf("test_loadstore_fp16_f32 ...\n");
AlignedData<v_uint16> data; data.a.clear();
data.a.d[0] = 0x3c00; // 1.0
data.a.d[R::nlanes - 1] = (unsigned short)0xc000; // -2.0
AlignedData<v_float32> data_f32; data_f32.a.clear();
AlignedData<v_uint16> out;
R r1 = vx_load_fp16_f32((short*)data.a.d);
R r2(r1);
EXPECT_EQ(1.0f, r1.get0());
vx_store(data_f32.a.d, r2);
EXPECT_EQ(-2.0f, data_f32.a.d[R::nlanes - 1]);
out.a.clear();
vx_store_fp16((short*)out.a.d, r2);
for (int i = 0; i < R::nlanes; ++i)
{
EXPECT_EQ(data.a[i], out.a[i]) << "i=" << i;
}
return *this;
}
#endif
#if CV_SIMD_FP16
TheTest & test_loadstore_fp16()
{
#if CV_FP16 && CV_SIMD
printf("test_loadstore_fp16 ...\n");
AlignedData<R> data;
AlignedData<R> out;
@ -1149,12 +1177,10 @@ template<typename R> struct TheTest
EXPECT_EQ(data.a, out.a);
return *this;
#endif
}
TheTest & test_float_cvt_fp16()
{
#if CV_FP16 && CV_SIMD
printf("test_float_cvt_fp16 ...\n");
AlignedData<v_float32> data;
// check conversion
@ -1165,9 +1191,8 @@ template<typename R> struct TheTest
EXPECT_EQ(r3.get0(), r1.get0());
return *this;
#endif
}
#endif
};
@ -1448,11 +1473,14 @@ void test_hal_intrin_float64()
void test_hal_intrin_float16()
{
DUMP_ENTRY(v_float16);
#if CV_SIMD_WIDTH > 16
#if CV_FP16
TheTest<v_float32>().test_loadstore_fp16_f32();
#endif
#if CV_SIMD_FP16
TheTest<v_float16>()
.test_loadstore_fp16()
.test_float_cvt_fp16()
;
;
#endif
}
#endif

View File

@ -209,7 +209,7 @@ inline Range clamp(const Range& r, int axisSize)
{
Range clamped(std::max(r.start, 0),
r.end > 0 ? std::min(r.end, axisSize) : axisSize + r.end + 1);
CV_Assert(clamped.start < clamped.end, clamped.end <= axisSize);
CV_Assert_N(clamped.start < clamped.end, clamped.end <= axisSize);
return clamped;
}

View File

@ -359,7 +359,7 @@ public:
{
if (!layerParams.get<bool>("use_global_stats", true))
{
CV_Assert(layer.bottom_size() == 1, layer.top_size() == 1);
CV_Assert_N(layer.bottom_size() == 1, layer.top_size() == 1);
LayerParams mvnParams;
mvnParams.set("eps", layerParams.get<float>("eps", 1e-5));

View File

@ -134,7 +134,7 @@ void blobFromImages(InputArrayOfArrays images_, OutputArray blob_, double scalef
if (ddepth == CV_8U)
{
CV_CheckEQ(scalefactor, 1.0, "Scaling is not supported for CV_8U blob depth");
CV_Assert(mean_ == Scalar(), "Mean subtraction is not supported for CV_8U blob depth");
CV_Assert(mean_ == Scalar() && "Mean subtraction is not supported for CV_8U blob depth");
}
std::vector<Mat> images;
@ -451,8 +451,8 @@ struct DataLayer : public Layer
{
double scale = scaleFactors[i];
Scalar& mean = means[i];
CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4,
outputs[i].type() == CV_32F);
CV_Assert(mean == Scalar() || inputsData[i].size[1] <= 4);
CV_CheckTypeEQ(outputs[i].type(), CV_32FC1, "");
bool singleMean = true;
for (int j = 1; j < std::min(4, inputsData[i].size[1]) && singleMean; ++j)
@ -569,7 +569,7 @@ struct DataLayer : public Layer
void finalize(const std::vector<Mat*>&, std::vector<Mat>& outputs) CV_OVERRIDE
{
CV_Assert(outputs.size() == scaleFactors.size(), outputs.size() == means.size(),
CV_Assert_N(outputs.size() == scaleFactors.size(), outputs.size() == means.size(),
inputsData.size() == outputs.size());
skip = true;
for (int i = 0; skip && i < inputsData.size(); ++i)
@ -588,7 +588,8 @@ struct DataLayer : public Layer
lp.precision = InferenceEngine::Precision::FP32;
std::shared_ptr<InferenceEngine::ScaleShiftLayer> ieLayer(new InferenceEngine::ScaleShiftLayer(lp));
CV_Assert(inputsData.size() == 1, inputsData[0].dims == 4);
CV_CheckEQ(inputsData.size(), (size_t)1, "");
CV_CheckEQ(inputsData[0].dims, 4, "");
const size_t numChannels = inputsData[0].size[1];
CV_Assert(numChannels <= 4);
@ -1237,7 +1238,7 @@ struct Net::Impl
void initHalideBackend()
{
CV_TRACE_FUNCTION();
CV_Assert(preferableBackend == DNN_BACKEND_HALIDE, haveHalide());
CV_Assert_N(preferableBackend == DNN_BACKEND_HALIDE, haveHalide());
// Iterator to current layer.
MapIdToLayerData::iterator it = layers.begin();
@ -1302,7 +1303,7 @@ struct Net::Impl
if (!node.empty())
{
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieNode.empty(), !ieNode->net.empty());
CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
layerNet = ieNode->net;
}
}
@ -1316,7 +1317,7 @@ struct Net::Impl
if (!inpNode.empty())
{
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty());
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
if (layerNet != ieInpNode->net)
{
// layerNet is empty or nodes are from different graphs.
@ -1330,7 +1331,7 @@ struct Net::Impl
void initInfEngineBackend()
{
CV_TRACE_FUNCTION();
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine());
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine());
#ifdef HAVE_INF_ENGINE
MapIdToLayerData::iterator it;
Ptr<InfEngineBackendNet> net;
@ -1425,7 +1426,7 @@ struct Net::Impl
if (!inpNode.empty())
{
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
CV_Assert(!ieInpNode.empty(), !ieInpNode->net.empty());
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
if (ieInpNode->net != net)
{
net = Ptr<InfEngineBackendNet>();
@ -1827,7 +1828,7 @@ struct Net::Impl
// To prevent memory collisions (i.e. when input of
// [conv] and output of [eltwise] is the same blob)
// we allocate a new blob.
CV_Assert(ld.outputBlobs.size() == 1, ld.outputBlobsWrappers.size() == 1);
CV_Assert_N(ld.outputBlobs.size() == 1, ld.outputBlobsWrappers.size() == 1);
ld.outputBlobs[0] = ld.outputBlobs[0].clone();
ld.outputBlobsWrappers[0] = wrap(ld.outputBlobs[0]);
@ -1984,7 +1985,7 @@ struct Net::Impl
}
// Layers that refer old input Mat will refer to the
// new data but the same Mat object.
CV_Assert(curr_output.data == output_slice.data, oldPtr == &curr_output);
CV_Assert_N(curr_output.data == output_slice.data, oldPtr == &curr_output);
}
ld.skip = true;
printf_(("\toptimized out Concat layer %s\n", concatLayer->name.c_str()));

View File

@ -48,7 +48,7 @@ public:
float varMeanScale = 1.f;
if (!hasWeights && !hasBias && blobs.size() > 2 && useGlobalStats) {
CV_Assert(blobs.size() == 3, blobs[2].type() == CV_32F);
CV_Assert(blobs.size() == 3); CV_CheckTypeEQ(blobs[2].type(), CV_32FC1, "");
varMeanScale = blobs[2].at<float>(0);
if (varMeanScale != 0)
varMeanScale = 1/varMeanScale;

View File

@ -349,8 +349,8 @@ public:
// (conv(I) + b1 ) * w + b2
// means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
const int outCn = weightsMat.size[0];
CV_Assert(!weightsMat.empty(), biasvec.size() == outCn + 2,
w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
if (!w.empty())
{
@ -512,13 +512,14 @@ public:
Size kernel, Size pad, Size stride, Size dilation,
const ActivationLayer* activ, int ngroups, int nstripes )
{
CV_Assert( input.dims == 4 && output.dims == 4,
CV_Assert_N(
input.dims == 4 && output.dims == 4,
input.size[0] == output.size[0],
weights.rows == output.size[1],
weights.cols == (input.size[1]/ngroups)*kernel.width*kernel.height,
input.type() == output.type(),
input.type() == weights.type(),
input.type() == CV_32F,
input.type() == CV_32FC1,
input.isContinuous(),
output.isContinuous(),
biasvec.size() == (size_t)output.size[1]+2);
@ -1009,8 +1010,8 @@ public:
name.c_str(), inputs[0]->size[0], inputs[0]->size[1], inputs[0]->size[2], inputs[0]->size[3],
kernel.width, kernel.height, pad.width, pad.height,
stride.width, stride.height, dilation.width, dilation.height);*/
CV_Assert(inputs.size() == (size_t)1, inputs[0]->size[1] % blobs[0].size[1] == 0,
outputs.size() == 1, inputs[0]->data != outputs[0].data);
CV_Assert_N(inputs.size() == (size_t)1, inputs[0]->size[1] % blobs[0].size[1] == 0,
outputs.size() == 1, inputs[0]->data != outputs[0].data);
int ngroups = inputs[0]->size[1]/blobs[0].size[1];
CV_Assert(outputs[0].size[1] % ngroups == 0);

View File

@ -14,7 +14,7 @@ class CropAndResizeLayerImpl CV_FINAL : public CropAndResizeLayer
public:
CropAndResizeLayerImpl(const LayerParams& params)
{
CV_Assert(params.has("width"), params.has("height"));
CV_Assert_N(params.has("width"), params.has("height"));
outWidth = params.get<float>("width");
outHeight = params.get<float>("height");
}
@ -24,7 +24,7 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2, inputs[0].size() == 4);
CV_Assert_N(inputs.size() == 2, inputs[0].size() == 4);
if (inputs[0][0] != 1)
CV_Error(Error::StsNotImplemented, "");
outputs.resize(1, MatShape(4));
@ -56,7 +56,7 @@ public:
const int inpWidth = inp.size[3];
const int inpSpatialSize = inpHeight * inpWidth;
const int outSpatialSize = outHeight * outWidth;
CV_Assert(inp.isContinuous(), out.isContinuous());
CV_Assert_N(inp.isContinuous(), out.isContinuous());
for (int b = 0; b < boxes.rows; ++b)
{

View File

@ -139,7 +139,7 @@ public:
const std::vector<float>& coeffs, EltwiseOp op,
const ActivationLayer* activ, int nstripes)
{
CV_Assert(1 < dst.dims && dst.dims <= 4, dst.type() == CV_32F, dst.isContinuous());
CV_Check(dst.dims, 1 < dst.dims && dst.dims <= 4, ""); CV_CheckTypeEQ(dst.type(), CV_32FC1, ""); CV_Assert(dst.isContinuous());
CV_Assert(coeffs.empty() || coeffs.size() == (size_t)nsrcs);
for( int i = 0; i > nsrcs; i++ )

View File

@ -276,7 +276,7 @@ public:
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
InferenceEngine::Layout::C,
{numChannels});
{(size_t)numChannels});
weights->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
@ -286,7 +286,7 @@ public:
else
{
CV_Assert(numChannels == blobs[0].total());
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
}
ieLayer->params["eps"] = format("%f", epsilon);

View File

@ -38,7 +38,7 @@ public:
{
paddings[i].first = paddingsParam.get<int>(i * 2); // Pad before.
paddings[i].second = paddingsParam.get<int>(i * 2 + 1); // Pad after.
CV_Assert(paddings[i].first >= 0, paddings[i].second >= 0);
CV_Assert_N(paddings[i].first >= 0, paddings[i].second >= 0);
}
}
@ -127,8 +127,8 @@ public:
const int padBottom = outHeight - dstRanges[2].end;
const int padLeft = dstRanges[3].start;
const int padRight = outWidth - dstRanges[3].end;
CV_Assert(padTop < inpHeight, padBottom < inpHeight,
padLeft < inpWidth, padRight < inpWidth);
CV_CheckLT(padTop, inpHeight, ""); CV_CheckLT(padBottom, inpHeight, "");
CV_CheckLT(padLeft, inpWidth, ""); CV_CheckLT(padRight, inpWidth, "");
for (size_t n = 0; n < inputs[0]->size[0]; ++n)
{

View File

@ -216,15 +216,15 @@ public:
switch (type)
{
case MAX:
CV_Assert(inputs.size() == 1, outputs.size() == 2);
CV_Assert_N(inputs.size() == 1, outputs.size() == 2);
maxPooling(*inputs[0], outputs[0], outputs[1]);
break;
case AVE:
CV_Assert(inputs.size() == 1, outputs.size() == 1);
CV_Assert_N(inputs.size() == 1, outputs.size() == 1);
avePooling(*inputs[0], outputs[0]);
break;
case ROI: case PSROI:
CV_Assert(inputs.size() == 2, outputs.size() == 1);
CV_Assert_N(inputs.size() == 2, outputs.size() == 1);
roiPooling(*inputs[0], *inputs[1], outputs[0]);
break;
default:
@ -311,7 +311,8 @@ public:
Size stride, Size pad, bool avePoolPaddedArea, int poolingType, float spatialScale,
bool computeMaxIdx, int nstripes)
{
CV_Assert(src.isContinuous(), dst.isContinuous(),
CV_Assert_N(
src.isContinuous(), dst.isContinuous(),
src.type() == CV_32F, src.type() == dst.type(),
src.dims == 4, dst.dims == 4,
((poolingType == ROI || poolingType == PSROI) && dst.size[0] ==rois.size[0] || src.size[0] == dst.size[0]),

View File

@ -254,7 +254,7 @@ public:
}
if (params.has("offset_h") || params.has("offset_w"))
{
CV_Assert(!params.has("offset"), params.has("offset_h"), params.has("offset_w"));
CV_Assert_N(!params.has("offset"), params.has("offset_h"), params.has("offset_w"));
getParams("offset_h", params, &_offsetsY);
getParams("offset_w", params, &_offsetsX);
CV_Assert(_offsetsX.size() == _offsetsY.size());
@ -299,7 +299,8 @@ public:
void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(inputs.size() > 1, inputs[0]->dims == 4, inputs[1]->dims == 4);
CV_CheckGT(inputs.size(), (size_t)1, "");
CV_CheckEQ(inputs[0]->dims, 4, ""); CV_CheckEQ(inputs[1]->dims, 4, "");
int layerWidth = inputs[0]->size[3];
int layerHeight = inputs[0]->size[2];
@ -486,8 +487,8 @@ public:
if (_explicitSizes)
{
CV_Assert(!_boxWidths.empty(), !_boxHeights.empty(),
_boxWidths.size() == _boxHeights.size());
CV_Assert(!_boxWidths.empty()); CV_Assert(!_boxHeights.empty());
CV_Assert(_boxWidths.size() == _boxHeights.size());
ieLayer->params["width"] = format("%f", _boxWidths[0]);
ieLayer->params["height"] = format("%f", _boxHeights[0]);
for (int i = 1; i < _boxWidths.size(); ++i)
@ -529,7 +530,7 @@ public:
ieLayer->params["step_h"] = format("%f", _stepY);
ieLayer->params["step_w"] = format("%f", _stepX);
}
CV_Assert(_offsetsX.size() == 1, _offsetsY.size() == 1, _offsetsX[0] == _offsetsY[0]);
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
ieLayer->params["offset"] = format("%f", _offsetsX[0]);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));

View File

@ -197,7 +197,7 @@ public:
}
else
{
CV_Assert(inputs.size() == 2, total(inputs[0]) == total(inputs[1]));
CV_Assert_N(inputs.size() == 2, total(inputs[0]) == total(inputs[1]));
outputs.assign(1, inputs[1]);
}
return true;

View File

@ -43,7 +43,7 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1, inputs[0].size() == 4);
CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]);
outputs[0][2] = outHeight > 0 ? outHeight : (outputs[0][2] * zoomFactorHeight);
outputs[0][3] = outWidth > 0 ? outWidth : (outputs[0][3] * zoomFactorWidth);
@ -106,7 +106,7 @@ public:
const int inpSpatialSize = inpHeight * inpWidth;
const int outSpatialSize = outHeight * outWidth;
const int numPlanes = inp.size[0] * inp.size[1];
CV_Assert(inp.isContinuous(), out.isContinuous());
CV_Assert_N(inp.isContinuous(), out.isContinuous());
Mat inpPlanes = inp.reshape(1, numPlanes * inpHeight);
Mat outPlanes = out.reshape(1, numPlanes * outHeight);
@ -184,7 +184,7 @@ public:
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1, inputs[0].size() == 4);
CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]);
outputs[0][2] = outHeight > 0 ? outHeight : (1 + zoomFactorHeight * (outputs[0][2] - 1));
outputs[0][3] = outWidth > 0 ? outWidth : (1 + zoomFactorWidth * (outputs[0][3] - 1));

View File

@ -64,7 +64,7 @@ public:
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_Assert(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);
CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);
Mat &inpBlob = *inputs[0];
Mat &outBlob = outputs[0];
@ -76,7 +76,9 @@ public:
weights = weights.reshape(1, 1);
MatShape inpShape = shape(inpBlob);
const int numWeights = !weights.empty() ? weights.total() : bias.total();
CV_Assert(numWeights != 0, !hasWeights || !hasBias || weights.total() == bias.total());
CV_Assert(numWeights != 0);
if (hasWeights && hasBias)
CV_CheckEQ(weights.total(), bias.total(), "Incompatible weights/bias blobs");
int endAxis;
for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)
@ -84,9 +86,9 @@ public:
if (total(inpShape, axis, endAxis) == numWeights)
break;
}
CV_Assert(total(inpShape, axis, endAxis) == numWeights,
!hasBias || numWeights == bias.total(),
inpBlob.type() == CV_32F && outBlob.type() == CV_32F);
CV_Assert(total(inpShape, axis, endAxis) == numWeights);
CV_Assert(!hasBias || numWeights == bias.total());
CV_CheckTypeEQ(inpBlob.type(), CV_32FC1, ""); CV_CheckTypeEQ(outBlob.type(), CV_32FC1, "");
int numSlices = total(inpShape, 0, axis);
float* inpData = (float*)inpBlob.data;

View File

@ -25,7 +25,7 @@ void NMSBoxes(const std::vector<Rect>& bboxes, const std::vector<float>& scores,
const float score_threshold, const float nms_threshold,
std::vector<int>& indices, const float eta, const int top_k)
{
CV_Assert(bboxes.size() == scores.size(), score_threshold >= 0,
CV_Assert_N(bboxes.size() == scores.size(), score_threshold >= 0,
nms_threshold >= 0, eta > 0);
NMSFast_(bboxes, scores, score_threshold, nms_threshold, eta, top_k, indices, rectOverlap);
}
@ -46,7 +46,7 @@ void NMSBoxes(const std::vector<RotatedRect>& bboxes, const std::vector<float>&
const float score_threshold, const float nms_threshold,
std::vector<int>& indices, const float eta, const int top_k)
{
CV_Assert(bboxes.size() == scores.size(), score_threshold >= 0,
CV_Assert_N(bboxes.size() == scores.size(), score_threshold >= 0,
nms_threshold >= 0, eta > 0);
NMSFast_(bboxes, scores, score_threshold, nms_threshold, eta, top_k, indices, rotatedRectIOU);
}

View File

@ -221,7 +221,7 @@ public:
std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor());
CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1);
CV_CheckEQ(epsMat.total(), (size_t)1, ""); CV_CheckTypeEQ(epsMat.type(), CV_32FC1, "");
fusedNode->mutable_input()->RemoveLast();
fusedNode->clear_attr();
@ -256,7 +256,7 @@ public:
std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor());
CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1);
CV_CheckEQ(epsMat.total(), (size_t)1, ""); CV_CheckTypeEQ(epsMat.type(), CV_32FC1, "");
fusedNode->mutable_input()->RemoveLast();
fusedNode->clear_attr();
@ -593,7 +593,7 @@ public:
std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
Mat factorsMat = getTensorContent(inputNodes[1]->attr().at("value").tensor());
CV_Assert(factorsMat.total() == 2, factorsMat.type() == CV_32SC1);
CV_CheckEQ(factorsMat.total(), (size_t)2, ""); CV_CheckTypeEQ(factorsMat.type(), CV_32SC1, "");
// Height scale factor
tensorflow::TensorProto* factorY = inputNodes[1]->mutable_attr()->at("value").mutable_tensor();

View File

@ -545,8 +545,8 @@ const tensorflow::TensorProto& TFImporter::getConstBlob(const tensorflow::NodeDe
}
else
{
CV_Assert(nodeIdx < netTxt.node_size(),
netTxt.node(nodeIdx).name() == kernel_inp.name);
CV_Assert_N(nodeIdx < netTxt.node_size(),
netTxt.node(nodeIdx).name() == kernel_inp.name);
return netTxt.node(nodeIdx).attr().at("value").tensor();
}
}
@ -587,8 +587,8 @@ static void addConstNodes(tensorflow::GraphDef& net, std::map<String, int>& cons
Mat qMin = getTensorContent(net.node(minId).attr().at("value").tensor());
Mat qMax = getTensorContent(net.node(maxId).attr().at("value").tensor());
CV_Assert(qMin.total() == 1, qMin.type() == CV_32FC1,
qMax.total() == 1, qMax.type() == CV_32FC1);
CV_Assert_N(qMin.total() == 1, qMin.type() == CV_32FC1,
qMax.total() == 1, qMax.type() == CV_32FC1);
Mat content = getTensorContent(*tensor);
@ -1295,8 +1295,9 @@ void TFImporter::populateNet(Net dstNet)
CV_Assert(layer.input_size() == 3);
Mat begins = getTensorContent(getConstBlob(layer, value_id, 1));
Mat sizes = getTensorContent(getConstBlob(layer, value_id, 2));
CV_Assert(!begins.empty(), !sizes.empty(), begins.type() == CV_32SC1,
sizes.type() == CV_32SC1);
CV_Assert_N(!begins.empty(), !sizes.empty());
CV_CheckTypeEQ(begins.type(), CV_32SC1, "");
CV_CheckTypeEQ(sizes.type(), CV_32SC1, "");
if (begins.total() == 4 && getDataLayout(name, data_layouts) == DATA_LAYOUT_NHWC)
{
@ -1665,7 +1666,7 @@ void TFImporter::populateNet(Net dstNet)
if (layer.input_size() == 2)
{
Mat outSize = getTensorContent(getConstBlob(layer, value_id, 1));
CV_Assert(outSize.type() == CV_32SC1, outSize.total() == 2);
CV_CheckTypeEQ(outSize.type(), CV_32SC1, ""); CV_CheckEQ(outSize.total(), (size_t)2, "");
layerParams.set("height", outSize.at<int>(0, 0));
layerParams.set("width", outSize.at<int>(0, 1));
}
@ -1673,8 +1674,8 @@ void TFImporter::populateNet(Net dstNet)
{
Mat factorHeight = getTensorContent(getConstBlob(layer, value_id, 1));
Mat factorWidth = getTensorContent(getConstBlob(layer, value_id, 2));
CV_Assert(factorHeight.type() == CV_32SC1, factorHeight.total() == 1,
factorWidth.type() == CV_32SC1, factorWidth.total() == 1);
CV_CheckTypeEQ(factorHeight.type(), CV_32SC1, ""); CV_CheckEQ(factorHeight.total(), (size_t)1, "");
CV_CheckTypeEQ(factorWidth.type(), CV_32SC1, ""); CV_CheckEQ(factorWidth.total(), (size_t)1, "");
layerParams.set("zoom_factor_x", factorWidth.at<int>(0));
layerParams.set("zoom_factor_y", factorHeight.at<int>(0));
}
@ -1772,7 +1773,7 @@ void TFImporter::populateNet(Net dstNet)
CV_Assert(layer.input_size() == 3);
Mat cropSize = getTensorContent(getConstBlob(layer, value_id, 2));
CV_Assert(cropSize.type() == CV_32SC1, cropSize.total() == 2);
CV_CheckTypeEQ(cropSize.type(), CV_32SC1, ""); CV_CheckEQ(cropSize.total(), (size_t)2, "");
layerParams.set("height", cropSize.at<int>(0));
layerParams.set("width", cropSize.at<int>(1));
@ -1826,8 +1827,8 @@ void TFImporter::populateNet(Net dstNet)
Mat minValue = getTensorContent(getConstBlob(layer, value_id, 1));
Mat maxValue = getTensorContent(getConstBlob(layer, value_id, 2));
CV_Assert(minValue.total() == 1, minValue.type() == CV_32F,
maxValue.total() == 1, maxValue.type() == CV_32F);
CV_CheckEQ(minValue.total(), (size_t)1, ""); CV_CheckTypeEQ(minValue.type(), CV_32FC1, "");
CV_CheckEQ(maxValue.total(), (size_t)1, ""); CV_CheckTypeEQ(maxValue.type(), CV_32FC1, "");
layerParams.set("min_value", minValue.at<float>(0));
layerParams.set("max_value", maxValue.at<float>(0));

View File

@ -896,8 +896,8 @@ struct TorchImporter
else if (nnName == "SpatialZeroPadding" || nnName == "SpatialReflectionPadding")
{
readTorchTable(scalarParams, tensorParams);
CV_Assert(scalarParams.has("pad_l"), scalarParams.has("pad_r"),
scalarParams.has("pad_t"), scalarParams.has("pad_b"));
CV_Assert_N(scalarParams.has("pad_l"), scalarParams.has("pad_r"),
scalarParams.has("pad_t"), scalarParams.has("pad_b"));
int padTop = scalarParams.get<int>("pad_t");
int padLeft = scalarParams.get<int>("pad_l");
int padRight = scalarParams.get<int>("pad_r");

View File

@ -113,7 +113,11 @@ TEST_P(Convolution, Accuracy)
bool skipCheck = false;
if (cvtest::skipUnstableTests && backendId == DNN_BACKEND_OPENCV &&
(targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) &&
kernel == Size(3, 1) && stride == Size(1, 1) && pad == Size(0, 1))
(
(kernel == Size(3, 1) && stride == Size(1, 1) && pad == Size(0, 1)) ||
(stride.area() > 1 && !(pad.width == 0 && pad.height == 0))
)
)
skipCheck = true;
int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width};

View File

@ -814,7 +814,7 @@ TEST_P(Layer_Test_DWconv_Prelu, Accuracy)
const int group = 3; //outChannels=group when group>1
const int num_output = get<1>(GetParam());
const int kernel_depth = num_input/group;
CV_Assert(num_output >= group, num_output % group == 0, num_input % group == 0);
CV_Assert_N(num_output >= group, num_output % group == 0, num_input % group == 0);
Net net;
//layer 1: dwconv

View File

@ -1500,7 +1500,7 @@ MainWindowProc( HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam )
rgn = CreateRectRgn(0, 0, wrc.right, wrc.bottom);
rgn1 = CreateRectRgn(cr.left, cr.top, cr.right, cr.bottom);
rgn2 = CreateRectRgn(tr.left, tr.top, tr.right, tr.bottom);
CV_Assert(rgn != 0, rgn1 != 0, rgn2 != 0);
CV_Assert_N(rgn != 0, rgn1 != 0, rgn2 != 0);
ret = CombineRgn(rgn, rgn, rgn1, RGN_DIFF);
ret = CombineRgn(rgn, rgn, rgn2, RGN_DIFF);

View File

@ -50,8 +50,8 @@
//! @addtogroup imgcodecs_ios
//! @{
UIImage* MatToUIImage(const cv::Mat& image);
void UIImageToMat(const UIImage* image,
cv::Mat& m, bool alphaExist = false);
CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image);
CV_EXPORTS void UIImageToMat(const UIImage* image,
cv::Mat& m, bool alphaExist = false);
//! @}

View File

@ -47,8 +47,8 @@
#include "opencv2/core.hpp"
#include "precomp.hpp"
UIImage* MatToUIImage(const cv::Mat& image);
void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist);
CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image);
CV_EXPORTS void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist);
UIImage* MatToUIImage(const cv::Mat& image) {

View File

@ -859,45 +859,39 @@ static int read_frame_v4l2(CvCaptureCAM_V4L* capture) {
}
static int mainloop_v4l2(CvCaptureCAM_V4L* capture) {
unsigned int count;
for (;;) {
fd_set fds;
struct timeval tv;
int r;
count = 1;
FD_ZERO (&fds);
FD_SET (capture->deviceHandle, &fds);
while (count-- > 0) {
for (;;) {
fd_set fds;
struct timeval tv;
int r;
/* Timeout. */
tv.tv_sec = 10;
tv.tv_usec = 0;
FD_ZERO (&fds);
FD_SET (capture->deviceHandle, &fds);
r = select (capture->deviceHandle+1, &fds, NULL, NULL, &tv);
/* Timeout. */
tv.tv_sec = 10;
tv.tv_usec = 0;
if (-1 == r) {
if (EINTR == errno)
continue;
r = select (capture->deviceHandle+1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
perror ("select");
}
if (0 == r) {
fprintf (stderr, "select timeout\n");
/* end the infinite loop */
break;
}
int returnCode = read_frame_v4l2 (capture);
if(returnCode == -1)
return -1;
if(returnCode == 1)
return 1;
perror ("select");
}
if (0 == r) {
fprintf (stderr, "select timeout\n");
/* end the infinite loop */
break;
}
int returnCode = read_frame_v4l2 (capture);
if(returnCode == -1)
return -1;
if(returnCode == 1)
return 1;
}
return 0;
}

View File

@ -49,7 +49,6 @@ int main(int argc, char** argv)
float scale = parser.get<float>("scale");
Scalar mean = parser.get<Scalar>("mean");
bool swapRB = parser.get<bool>("rgb");
CV_Assert(parser.has("width"), parser.has("height"));
int inpWidth = parser.get<int>("width");
int inpHeight = parser.get<int>("height");
String model = parser.get<String>("model");
@ -72,7 +71,13 @@ int main(int argc, char** argv)
}
}
CV_Assert(parser.has("model"));
if (!parser.check())
{
parser.printErrors();
return 1;
}
CV_Assert(!model.empty());
//! [Read and initialize network]
Net net = readNet(model, config, framework);
net.setPreferableBackend(backendId);

View File

@ -108,7 +108,7 @@ public:
}
else
{
CV_Assert(blobs.size() == 2, blobs[0].total() == 1, blobs[1].total() == 1);
CV_Assert(blobs.size() == 2); CV_Assert(blobs[0].total() == 1); CV_Assert(blobs[1].total() == 1);
factorHeight = blobs[0].at<int>(0, 0);
factorWidth = blobs[1].at<int>(0, 0);
outHeight = outWidth = 0;

View File

@ -57,7 +57,6 @@ int main(int argc, char** argv)
float scale = parser.get<float>("scale");
Scalar mean = parser.get<Scalar>("mean");
bool swapRB = parser.get<bool>("rgb");
CV_Assert(parser.has("width"), parser.has("height"));
int inpWidth = parser.get<int>("width");
int inpHeight = parser.get<int>("height");
String model = parser.get<String>("model");
@ -99,7 +98,13 @@ int main(int argc, char** argv)
}
}
CV_Assert(parser.has("model"));
if (!parser.check())
{
parser.printErrors();
return 1;
}
CV_Assert(!model.empty());
//! [Read and initialize network]
Net net = readNet(model, config, framework);
net.setPreferableBackend(backendId);

View File

@ -33,9 +33,16 @@ int main(int argc, char** argv)
float nmsThreshold = parser.get<float>("nms");
int inpWidth = parser.get<int>("width");
int inpHeight = parser.get<int>("height");
CV_Assert(parser.has("model"));
String model = parser.get<String>("model");
if (!parser.check())
{
parser.printErrors();
return 1;
}
CV_Assert(!model.empty());
// Load network.
Net net = readNet(model);
@ -113,9 +120,9 @@ void decode(const Mat& scores, const Mat& geometry, float scoreThresh,
std::vector<RotatedRect>& detections, std::vector<float>& confidences)
{
detections.clear();
CV_Assert(scores.dims == 4, geometry.dims == 4, scores.size[0] == 1,
geometry.size[0] == 1, scores.size[1] == 1, geometry.size[1] == 5,
scores.size[2] == geometry.size[2], scores.size[3] == geometry.size[3]);
CV_Assert(scores.dims == 4); CV_Assert(geometry.dims == 4); CV_Assert(scores.size[0] == 1);
CV_Assert(geometry.size[0] == 1); CV_Assert(scores.size[1] == 1); CV_Assert(geometry.size[1] == 5);
CV_Assert(scores.size[2] == geometry.size[2]); CV_Assert(scores.size[3] == geometry.size[3]);
const int height = scores.size[2];
const int width = scores.size[3];