diff --git a/modules/cudaarithm/CMakeLists.txt b/modules/cudaarithm/CMakeLists.txt index b1aacd68696..fa9c9194163 100644 --- a/modules/cudaarithm/CMakeLists.txt +++ b/modules/cudaarithm/CMakeLists.txt @@ -9,7 +9,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4127 /wd4324 /wd4512 -Wundef -Wmissing-d set(extra_dependencies "") set(optional_dependencies "") if(ENABLE_CUDA_FIRST_CLASS_LANGUAGE) - if(UNIX AND NOT BUILD_SHARED_LIBS AND CUDA_VERSION_STRING VERSION_GREATER_EQUAL 9.2 AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.23) + if(UNIX AND NOT BUILD_SHARED_LIBS AND CUDA_VERSION_STRING VERSION_GREATER_EQUAL 9.2 AND CUDA_VERSION_STRING VERSION_LESS 13.0 AND CMAKE_VERSION VERSION_GREATER_EQUAL 3.23) set(CUDA_FFT_LIB_EXT "_static_nocallback") endif() list(APPEND extra_dependencies CUDA::cudart_static CUDA::nppial${CUDA_LIB_EXT} CUDA::nppc${CUDA_LIB_EXT} CUDA::nppitc${CUDA_LIB_EXT} CUDA::nppig${CUDA_LIB_EXT} CUDA::nppist${CUDA_LIB_EXT} CUDA::nppidei${CUDA_LIB_EXT}) diff --git a/modules/cudaarithm/src/cuda/absdiff_scalar.cu b/modules/cudaarithm/src/cuda/absdiff_scalar.cu index 0955e40c8b1..2a559daaed3 100644 --- a/modules/cudaarithm/src/cuda/absdiff_scalar.cu +++ b/modules/cudaarithm/src/cuda/absdiff_scalar.cu @@ -49,6 +49,7 @@ #else #include "opencv2/cudev.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" using namespace cv::cudev; @@ -56,6 +57,7 @@ void absDiffScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const G namespace { + using cv::cuda::device::compat::double4Compat; template struct AbsDiffScalarOp : unary_function { ScalarType val; @@ -114,7 +116,7 @@ void absDiffScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const G absDiffScalarImpl, absDiffScalarImpl, absDiffScalarImpl, absDiffScalarImpl }, { - absDiffScalarImpl, absDiffScalarImpl, absDiffScalarImpl, absDiffScalarImpl + absDiffScalarImpl, absDiffScalarImpl, absDiffScalarImpl, absDiffScalarImpl } }; diff --git a/modules/cudaarithm/src/cuda/add_scalar.cu b/modules/cudaarithm/src/cuda/add_scalar.cu index 92838a2a57d..af608a13d7d 100644 --- a/modules/cudaarithm/src/cuda/add_scalar.cu +++ b/modules/cudaarithm/src/cuda/add_scalar.cu @@ -49,6 +49,7 @@ #else #include "opencv2/cudev.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" using namespace cv::cudev; @@ -56,6 +57,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa namespace { + using cv::cuda::device::compat::double4Compat; template struct AddScalarOp : unary_function { ScalarType val; @@ -105,7 +107,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} }, { {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, @@ -114,7 +116,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} }, { {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, @@ -123,7 +125,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} }, { {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, @@ -132,7 +134,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} }, { {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, @@ -141,7 +143,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} }, { {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, @@ -150,7 +152,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} }, { {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, @@ -159,7 +161,7 @@ void addScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, {0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/, 0 /*addScalarImpl*/}, - {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} + {addScalarImpl, addScalarImpl, addScalarImpl, addScalarImpl} } }; diff --git a/modules/cudaarithm/src/cuda/div_scalar.cu b/modules/cudaarithm/src/cuda/div_scalar.cu index 97ada834104..eb10efef150 100644 --- a/modules/cudaarithm/src/cuda/div_scalar.cu +++ b/modules/cudaarithm/src/cuda/div_scalar.cu @@ -49,6 +49,7 @@ #else #include "opencv2/cudev.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" using namespace cv::cudev; @@ -56,6 +57,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G namespace { + using cv::cuda::device::compat::double4Compat; template struct SafeDiv; template struct SafeDiv { @@ -170,7 +172,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} }, { {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, @@ -179,7 +181,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} }, { {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, @@ -188,7 +190,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} }, { {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, @@ -197,7 +199,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} }, { {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, @@ -206,7 +208,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} }, { {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, @@ -215,7 +217,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} }, { {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, @@ -224,7 +226,7 @@ void divScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, {0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/, 0 /*divScalarImpl*/}, - {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} + {divScalarImpl, divScalarImpl, divScalarImpl, divScalarImpl} } }; diff --git a/modules/cudaarithm/src/cuda/mul_scalar.cu b/modules/cudaarithm/src/cuda/mul_scalar.cu index f27ef26ddd7..c599aa03182 100644 --- a/modules/cudaarithm/src/cuda/mul_scalar.cu +++ b/modules/cudaarithm/src/cuda/mul_scalar.cu @@ -49,6 +49,7 @@ #else #include "opencv2/cudev.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" using namespace cv::cudev; @@ -56,6 +57,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa namespace { + using cv::cuda::device::compat::double4Compat; template struct MulScalarOp : unary_function { ScalarType val; @@ -102,7 +104,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} }, { {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, @@ -111,7 +113,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} }, { {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, @@ -120,7 +122,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} }, { {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, @@ -129,7 +131,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} }, { {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, @@ -138,7 +140,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} }, { {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, @@ -147,7 +149,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} }, { {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, @@ -156,7 +158,7 @@ void mulScalar(const GpuMat& src, cv::Scalar val, bool, GpuMat& dst, const GpuMa {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, {0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/, 0 /*mulScalarImpl*/}, - {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} + {mulScalarImpl, mulScalarImpl, mulScalarImpl, mulScalarImpl} } }; diff --git a/modules/cudaarithm/src/cuda/sub_scalar.cu b/modules/cudaarithm/src/cuda/sub_scalar.cu index c4eeec01482..3d9c118fa34 100644 --- a/modules/cudaarithm/src/cuda/sub_scalar.cu +++ b/modules/cudaarithm/src/cuda/sub_scalar.cu @@ -49,6 +49,7 @@ #else #include "opencv2/cudev.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" using namespace cv::cudev; @@ -56,6 +57,8 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G namespace { + using cv::cuda::device::compat::double4Compat; + template struct SubScalarOp : unary_function { ScalarType val; @@ -128,7 +131,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} }, { {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, @@ -137,7 +140,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} }, { {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, @@ -146,7 +149,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} }, { {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, @@ -155,7 +158,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} }, { {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, @@ -164,7 +167,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} }, { {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, @@ -173,7 +176,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} }, { {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, @@ -182,7 +185,7 @@ void subScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const G {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, {0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/, 0 /*subScalarImpl*/}, - {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} + {subScalarImpl, subScalarImpl, subScalarImpl, subScalarImpl} } }; diff --git a/modules/cudalegacy/src/cuda/NCVPixelOperations.hpp b/modules/cudalegacy/src/cuda/NCVPixelOperations.hpp index fcebf576d98..b6e9e5defbc 100644 --- a/modules/cudalegacy/src/cuda/NCVPixelOperations.hpp +++ b/modules/cudalegacy/src/cuda/NCVPixelOperations.hpp @@ -46,6 +46,9 @@ #include #include #include "opencv2/cudalegacy/NCV.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" +using cv::cuda::device::compat::double4Compat; +using cv::cuda::device::compat::make_double4_compat; template inline __host__ __device__ TBase _pixMaxVal(); template<> inline __host__ __device__ Ncv8u _pixMaxVal() {return UCHAR_MAX;} @@ -101,7 +104,7 @@ template<> struct TConvBase2Vec {typedef float3 TVec;}; template<> struct TConvBase2Vec {typedef float4 TVec;}; template<> struct TConvBase2Vec {typedef double1 TVec;}; template<> struct TConvBase2Vec {typedef double3 TVec;}; -template<> struct TConvBase2Vec {typedef double4 TVec;}; +template<> struct TConvBase2Vec {typedef double4Compat TVec;}; //TODO: consider using CUDA intrinsics to avoid branching template inline __host__ __device__ void _TDemoteClampZ(Tin &a, Ncv8u &out) {out = (Ncv8u)CLAMP_0_255(a);} @@ -130,7 +133,7 @@ template<> inline __host__ __device__ float3 _pixMakeZero() {return make template<> inline __host__ __device__ float4 _pixMakeZero() {return make_float4(0.f,0.f,0.f,0.f);} template<> inline __host__ __device__ double1 _pixMakeZero() {return make_double1(0.);} template<> inline __host__ __device__ double3 _pixMakeZero() {return make_double3(0.,0.,0.);} -template<> inline __host__ __device__ double4 _pixMakeZero() {return make_double4(0.,0.,0.,0.);} +template<> inline __host__ __device__ double4Compat _pixMakeZero() {return make_double4_compat(0.,0.,0.,0.);} static inline __host__ __device__ uchar1 _pixMake(Ncv8u x) {return make_uchar1(x);} static inline __host__ __device__ uchar3 _pixMake(Ncv8u x, Ncv8u y, Ncv8u z) {return make_uchar3(x,y,z);} @@ -146,7 +149,7 @@ static inline __host__ __device__ float3 _pixMake(Ncv32f x, Ncv32f y, Ncv32f z) static inline __host__ __device__ float4 _pixMake(Ncv32f x, Ncv32f y, Ncv32f z, Ncv32f w) {return make_float4(x,y,z,w);} static inline __host__ __device__ double1 _pixMake(Ncv64f x) {return make_double1(x);} static inline __host__ __device__ double3 _pixMake(Ncv64f x, Ncv64f y, Ncv64f z) {return make_double3(x,y,z);} -static inline __host__ __device__ double4 _pixMake(Ncv64f x, Ncv64f y, Ncv64f z, Ncv64f w) {return make_double4(x,y,z,w);} +static inline __host__ __device__ double4Compat _pixMake(Ncv64f x, Ncv64f y, Ncv64f z, Ncv64f w) {return make_double4_compat(x,y,z,w);} template struct __pixDemoteClampZ_CN {static __host__ __device__ Tout _pixDemoteClampZ_CN(Tin &pix);}; @@ -329,13 +332,13 @@ template static __host__ __device__ Tout _pixDist(T template struct TAccPixWeighted; template<> struct TAccPixWeighted {typedef double1 type;}; template<> struct TAccPixWeighted {typedef double3 type;}; -template<> struct TAccPixWeighted {typedef double4 type;}; +template<> struct TAccPixWeighted {typedef double4Compat type;}; template<> struct TAccPixWeighted {typedef double1 type;}; template<> struct TAccPixWeighted {typedef double3 type;}; -template<> struct TAccPixWeighted {typedef double4 type;}; +template<> struct TAccPixWeighted {typedef double4Compat type;}; template<> struct TAccPixWeighted {typedef double1 type;}; template<> struct TAccPixWeighted {typedef double3 type;}; -template<> struct TAccPixWeighted {typedef double4 type;}; +template<> struct TAccPixWeighted {typedef double4Compat type;}; template struct TAccPixDist {}; template<> struct TAccPixDist {typedef Ncv32u type;}; diff --git a/modules/cudalegacy/src/cuda/NCVPyramid.cu b/modules/cudalegacy/src/cuda/NCVPyramid.cu index c37b1a9e1dc..2bf36aeddfd 100644 --- a/modules/cudalegacy/src/cuda/NCVPyramid.cu +++ b/modules/cudalegacy/src/cuda/NCVPyramid.cu @@ -129,10 +129,10 @@ static __host__ __device__ float4 _average4_CN(const float4 &p00, const float4 & return out; }}; -template<> struct __average4_CN { -static __host__ __device__ double4 _average4_CN(const double4 &p00, const double4 &p01, const double4 &p10, const double4 &p11) +template<> struct __average4_CN { +static __host__ __device__ double4Compat _average4_CN(const double4Compat&p00, const double4Compat&p01, const double4Compat&p10, const double4Compat&p11) { - double4 out; + double4Compat out; out.x = (p00.x + p01.x + p10.x + p11.x) / 4; out.y = (p00.y + p01.y + p10.y + p11.y) / 4; out.z = (p00.z + p01.z + p10.z + p11.z) / 4; diff --git a/modules/cudev/include/opencv2/cudev/util/detail/type_traits.hpp b/modules/cudev/include/opencv2/cudev/util/detail/type_traits.hpp index 91e47362f94..d43d93c0f97 100644 --- a/modules/cudev/include/opencv2/cudev/util/detail/type_traits.hpp +++ b/modules/cudev/include/opencv2/cudev/util/detail/type_traits.hpp @@ -47,11 +47,14 @@ #define OPENCV_CUDEV_UTIL_TYPE_TRAITS_DETAIL_HPP #include "../../common.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" namespace cv { namespace cudev { namespace type_traits_detail { + using cv::cuda::device::compat::double4; + template struct IsSignedIntergral { enum {value = 0}; }; template <> struct IsSignedIntergral { enum {value = 1}; }; template <> struct IsSignedIntergral { enum {value = 1}; }; diff --git a/modules/cudev/include/opencv2/cudev/util/vec_math.hpp b/modules/cudev/include/opencv2/cudev/util/vec_math.hpp index f6d8d2cda41..de389041f07 100644 --- a/modules/cudev/include/opencv2/cudev/util/vec_math.hpp +++ b/modules/cudev/include/opencv2/cudev/util/vec_math.hpp @@ -48,9 +48,12 @@ #include "vec_traits.hpp" #include "saturate_cast.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" namespace cv { namespace cudev { + using cv::cuda::device::compat::double4; + //! @addtogroup cudev //! @{ diff --git a/modules/cudev/include/opencv2/cudev/util/vec_traits.hpp b/modules/cudev/include/opencv2/cudev/util/vec_traits.hpp index 92f360c889b..3fdd8f4a846 100644 --- a/modules/cudev/include/opencv2/cudev/util/vec_traits.hpp +++ b/modules/cudev/include/opencv2/cudev/util/vec_traits.hpp @@ -47,8 +47,14 @@ #define OPENCV_CUDEV_UTIL_VEC_TRAITS_HPP #include "../common.hpp" +#include "opencv2/core/cuda/cuda_compat.hpp" -namespace cv { namespace cudev { +namespace cv { + + using cv::cuda::device::compat::double4; + using cv::cuda::device::compat::make_double4; + + namespace cudev { //! @addtogroup cudev //! @{ diff --git a/modules/gapi/include/opencv2/gapi/core.hpp b/modules/gapi/include/opencv2/gapi/core.hpp index 60bb2c50745..c312e47f3db 100644 --- a/modules/gapi/include/opencv2/gapi/core.hpp +++ b/modules/gapi/include/opencv2/gapi/core.hpp @@ -1796,7 +1796,7 @@ contains a 0-based cluster index for the \f$i^{th}\f$ sample. @note - Function textual ID is "org.opencv.core.kmeansND" - - In case of an N-dimentional points' set given, input GMat can have the following traits: + - In case of an N-dimensional points' set given, input GMat can have the following traits: 2 dimensions, a single row or column if there are N channels, or N columns if there is a single channel. Mat should have @ref CV_32F depth. - Although, if GMat with height != 1, width != 1, channels != 1 given as data, n-dimensional @@ -1806,7 +1806,7 @@ samples are considered given in amount of A, where A = height, n = width * chann width = 1, height = A, where A is samples amount, or width = bestLabels.width, height = bestLabels.height if bestLabels given; - the cluster centers are returned as 1-channel GMat with sizes -width = n, height = K, where n is samples' dimentionality and K is clusters' amount. +width = n, height = K, where n is samples' dimensionality and K is clusters' amount. - As one of possible usages, if you want to control the initial labels for each attempt by yourself, you can utilize just the core of the function. To do that, set the number of attempts to 1, initialize labels each time using a custom algorithm, pass them with the @@ -1814,7 +1814,7 @@ of attempts to 1, initialize labels each time using a custom algorithm, pass the @param data Data for clustering. An array of N-Dimensional points with float coordinates is needed. Function can take GArray, GArray for 2D and 3D cases or GMat for any -dimentionality and channels. +dimensionality and channels. @param K Number of clusters to split the set by. @param bestLabels Optional input integer array that can store the supposed initial cluster indices for every sample. Used when ( flags = #KMEANS_USE_INITIAL_LABELS ) flag is set. diff --git a/modules/xobjdetect/src/hog.cpp b/modules/xobjdetect/src/hog.cpp index 3504af28e45..8c889f21d0f 100644 --- a/modules/xobjdetect/src/hog.cpp +++ b/modules/xobjdetect/src/hog.cpp @@ -1668,9 +1668,16 @@ class HOGInvoker : Size sz(cvRound(img.cols/scale), cvRound(img.rows/scale)); Mat smallerImg(sz, img.type(), smallerImgBuf.ptr()); if( sz == img.size() ) + { smallerImg = Mat(sz, img.type(), img.data, img.step); + } else - resize(img, smallerImg, sz, 0, 0, INTER_LINEAR_EXACT); + { + if(getDefaultAlgorithmHint() == ALGO_HINT_APPROX) + resize(img, smallerImg, sz, 0, 0, INTER_LINEAR); + else + resize(img, smallerImg, sz, 0, 0, INTER_LINEAR_EXACT); + } hog->detect(smallerImg, locations, hitsWeights, hitThreshold, winStride, padding); Size scaledWinSize = Size(cvRound(hog->winSize.width*scale), cvRound(hog->winSize.height*scale));