diff --git a/modules/bgsegm/include/opencv2/bgsegm.hpp b/modules/bgsegm/include/opencv2/bgsegm.hpp index 1d2b6f892f2..6c894f8fb2f 100644 --- a/modules/bgsegm/include/opencv2/bgsegm.hpp +++ b/modules/bgsegm/include/opencv2/bgsegm.hpp @@ -62,6 +62,32 @@ The class implements the algorithm described in @cite KB2001 . class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor { public: + // BackgroundSubtractor interface + /** @brief Computes a foreground mask. + + @param image Next video frame of type CV_8UC(n),CV_8SC(n),CV_16UC(n),CV_16SC(n),CV_32SC(n),CV_32FC(n),CV_64FC(n), where n is 1,2,3,4. + @param fgmask The output foreground mask as an 8-bit binary image. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + */ + + CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + + /** @brief Computes a foreground mask and skips known foreground in evaluation. + + @param image Next video frame of type CV_8UC(n),CV_8SC(n),CV_16UC(n),CV_16SC(n),CV_32SC(n),CV_32FC(n),CV_64FC(n), where n is 1,2,3,4. + @param fgmask The output foreground mask as an 8-bit binary image. + @param knownForegroundMask The mask for inputting already known foreground, allows model to ignore learning known pixels. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + */ + + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + CV_WRAP virtual int getHistory() const = 0; CV_WRAP virtual void setHistory(int nframes) = 0; @@ -110,6 +136,22 @@ class CV_EXPORTS_W BackgroundSubtractorGMG : public BackgroundSubtractor is completely reinitialized from the last frame. */ CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + + /** @brief Computes a foreground mask with known foreground mask input. + + @param image Next video frame. + @param fgmask The output foreground mask as an 8-bit binary image. + @param knownForegroundMask The mask for inputting already known foreground. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + + @note This method has a default virtual implementation that throws a "not impemented" error. + Foreground masking may not be supported by all background subtractors. + */ + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0; /** @brief Returns total number of distinct colors to maintain in histogram. @@ -210,6 +252,22 @@ class CV_EXPORTS_W BackgroundSubtractorCNT : public BackgroundSubtractor public: // BackgroundSubtractor interface CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + + /** @brief Computes a foreground mask with known foreground mask input. + + @param image Next video frame. + @param knownForegroundMask The mask for inputting already known foreground. + @param fgmask The output foreground mask as an 8-bit binary image. + @param learningRate The value between 0 and 1 that indicates how fast the background model is + learnt. Negative parameter value makes the algorithm to use some automatically chosen learning + rate. 0 means that the background model is not updated at all, 1 means that the background model + is completely reinitialized from the last frame. + + @note This method has a default virtual implementation that throws a "not impemented" error. + Foreground masking may not be supported by all background subtractors. + */ + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0; /** @brief Returns number of frames with same pixel color to consider stable. @@ -269,6 +327,7 @@ class CV_EXPORTS_W BackgroundSubtractorGSOC : public BackgroundSubtractor public: // BackgroundSubtractor interface CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0; }; @@ -280,6 +339,7 @@ class CV_EXPORTS_W BackgroundSubtractorLSBP : public BackgroundSubtractor public: // BackgroundSubtractor interface CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE = 0; CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE = 0; }; diff --git a/modules/bgsegm/src/bgfg_gaussmix.cpp b/modules/bgsegm/src/bgfg_gaussmix.cpp index d6c167689db..6fafe999e71 100644 --- a/modules/bgsegm/src/bgfg_gaussmix.cpp +++ b/modules/bgsegm/src/bgfg_gaussmix.cpp @@ -42,6 +42,7 @@ #include "precomp.hpp" #include +#include "opencv2/core/utils/logger.hpp" // to make sure we can use these short names #undef K @@ -104,6 +105,8 @@ class BackgroundSubtractorMOGImpl CV_FINAL : public BackgroundSubtractorMOG //! the update operator virtual void apply(InputArray image, OutputArray fgmask, double learningRate=0) CV_OVERRIDE; + virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate) CV_OVERRIDE; + //! re-initiaization method virtual void initialize(Size _frameSize, int _frameType) { @@ -461,6 +464,15 @@ void BackgroundSubtractorMOGImpl::apply(InputArray _image, OutputArray _fgmask, CV_Error( Error::StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" ); } +void BackgroundSubtractorMOGImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + Mat knownForegroundMask = _knownForegroundMask.getMat(); + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate); +} + Ptr createBackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma) { diff --git a/modules/bgsegm/src/bgfg_gmg.cpp b/modules/bgsegm/src/bgfg_gmg.cpp index a83a83cc696..9ec5101a736 100644 --- a/modules/bgsegm/src/bgfg_gmg.cpp +++ b/modules/bgsegm/src/bgfg_gmg.cpp @@ -51,6 +51,7 @@ #include "precomp.hpp" #include "opencv2/core/utility.hpp" #include +#include "opencv2/core/utils/logger.hpp" namespace cv { @@ -97,6 +98,7 @@ class BackgroundSubtractorGMGImpl CV_FINAL : public BackgroundSubtractorGMG * @param fgmask Output mask image representing foreground and background pixels */ virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1.0) CV_OVERRIDE; + virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate) CV_OVERRIDE; /** * Releases all inner buffers. @@ -473,6 +475,15 @@ void BackgroundSubtractorGMGImpl::apply(InputArray _frame, OutputArray _fgmask, ++frameNum_; } +void BackgroundSubtractorGMGImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double newLearningRate){ + Mat knownForegroundMask = _knownForegroundMask.getMat(); + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, newLearningRate); +} + void BackgroundSubtractorGMGImpl::release() { frameSize_ = Size(); diff --git a/modules/bgsegm/src/bgfg_gsoc.cpp b/modules/bgsegm/src/bgfg_gsoc.cpp index 0bec405f7a9..d4fbb1d03fe 100644 --- a/modules/bgsegm/src/bgfg_gsoc.cpp +++ b/modules/bgsegm/src/bgfg_gsoc.cpp @@ -53,6 +53,7 @@ #include #include #include "opencv2/core/cvdef.h" +#include "opencv2/core/utils/logger.hpp" namespace cv { @@ -494,6 +495,7 @@ class BackgroundSubtractorGSOCImpl CV_FINAL : public BackgroundSubtractorGSOC { float noiseRemovalThresholdFacFG); CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate = -1) CV_OVERRIDE; + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate) CV_OVERRIDE; CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE; @@ -542,6 +544,7 @@ class BackgroundSubtractorLSBPImpl CV_FINAL : public BackgroundSubtractorLSBP { ); CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate = -1) CV_OVERRIDE; + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate) CV_OVERRIDE; CV_WRAP virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE; @@ -793,6 +796,15 @@ void BackgroundSubtractorGSOCImpl::apply(InputArray _image, OutputArray _fgmask, this->postprocessing(fgMask); } +void BackgroundSubtractorGSOCImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + Mat knownForegroundMask = _knownForegroundMask.getMat(); + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate); +} + void BackgroundSubtractorGSOCImpl::getBackgroundImage(OutputArray _backgroundImage) const { CV_Assert(!backgroundModel.empty()); const Size sz = backgroundModel->getSize(); @@ -928,6 +940,15 @@ void BackgroundSubtractorLSBPImpl::apply(InputArray _image, OutputArray _fgmask, this->postprocessing(fgMask); } +void BackgroundSubtractorLSBPImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + Mat knownForegroundMask = _knownForegroundMask.getMat(); + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate); +} + void BackgroundSubtractorLSBPImpl::getBackgroundImage(OutputArray _backgroundImage) const { CV_Assert(!backgroundModel.empty()); const Size sz = backgroundModel->getSize(); diff --git a/modules/bgsegm/src/bgfg_subcnt.cpp b/modules/bgsegm/src/bgfg_subcnt.cpp index 1638de50e62..c46111f6a40 100644 --- a/modules/bgsegm/src/bgfg_subcnt.cpp +++ b/modules/bgsegm/src/bgfg_subcnt.cpp @@ -44,6 +44,7 @@ #include "precomp.hpp" #include +#include "opencv2/core/utils/logger.hpp" namespace cv { @@ -61,6 +62,8 @@ class BackgroundSubtractorCNTImpl CV_FINAL : public BackgroundSubtractorCNT // BackgroundSubtractor interface virtual void apply(InputArray image, OutputArray fgmask, double learningRate) CV_OVERRIDE; + virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate) CV_OVERRIDE; + virtual void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE; int getMinPixelStability() const CV_OVERRIDE; @@ -409,6 +412,14 @@ void BackgroundSubtractorCNTImpl::apply(InputArray image, OutputArray _fgmask, d prevFrame = frame; } +void BackgroundSubtractorCNTImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + Mat knownForegroundMask = _knownForegroundMask.getMat(); + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate); +} Ptr createBackgroundSubtractorCNT(int minPixelStability, bool useHistory, int maxStability, bool isParallel) { diff --git a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp index eb5467e9b87..fa05913e51d 100644 --- a/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp +++ b/modules/cudabgsegm/include/opencv2/cudabgsegm.hpp @@ -84,6 +84,8 @@ class CV_EXPORTS_W BackgroundSubtractorMOG : public cv::BackgroundSubtractor using cv::BackgroundSubtractor::apply; CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate, Stream& stream) = 0; + using cv::BackgroundSubtractor::getBackgroundImage; virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0; @@ -135,6 +137,8 @@ class CV_EXPORTS_W BackgroundSubtractorMOG2 : public cv::BackgroundSubtractorMOG CV_WRAP virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; + CV_WRAP virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate, Stream& stream) = 0; + virtual void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const = 0; CV_WRAP inline void getBackgroundImage(CV_OUT GpuMat &backgroundImage, Stream& stream) { diff --git a/modules/cudabgsegm/src/mog.cpp b/modules/cudabgsegm/src/mog.cpp index 8a43293d43a..1c0506ba7c9 100644 --- a/modules/cudabgsegm/src/mog.cpp +++ b/modules/cudabgsegm/src/mog.cpp @@ -41,6 +41,7 @@ //M*/ #include "precomp.hpp" +#include "opencv2/core/utils/logger.hpp" using namespace cv; using namespace cv::cuda; @@ -79,6 +80,9 @@ namespace void apply(InputArray image, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE; void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) CV_OVERRIDE; + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1) CV_OVERRIDE; + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate, Stream& stream) CV_OVERRIDE; + void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE; void getBackgroundImage(OutputArray backgroundImage, Stream& stream) const CV_OVERRIDE; @@ -131,6 +135,22 @@ namespace apply(image, fgmask, learningRate, Stream::Null()); } + void MOGImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate, Stream::Null()); + } + + void MOGImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate, Stream &stream){ + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate, stream); + } + void MOGImpl::apply(InputArray _frame, OutputArray _fgmask, double learningRate, Stream& stream) { using namespace cv::cuda::device::mog; diff --git a/modules/cudabgsegm/src/mog2.cpp b/modules/cudabgsegm/src/mog2.cpp index 47135a088ba..f4edc9b6216 100644 --- a/modules/cudabgsegm/src/mog2.cpp +++ b/modules/cudabgsegm/src/mog2.cpp @@ -42,6 +42,7 @@ #include "precomp.hpp" #include "cuda/mog2.hpp" +#include "opencv2/core/utils/logger.hpp" using namespace cv; using namespace cv::cuda; @@ -83,6 +84,9 @@ class MOG2Impl CV_FINAL : public cuda::BackgroundSubtractorMOG2 void apply(InputArray image, OutputArray fgmask, double learningRate = -1) CV_OVERRIDE; void apply(InputArray image, OutputArray fgmask, double learningRate, Stream &stream) CV_OVERRIDE; + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate = -1) CV_OVERRIDE; + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate, Stream& stream) CV_OVERRIDE; + void getBackgroundImage(OutputArray backgroundImage) const CV_OVERRIDE; void getBackgroundImage(OutputArray backgroundImage, Stream &stream) const CV_OVERRIDE; @@ -174,6 +178,22 @@ void MOG2Impl::apply(InputArray image, OutputArray fgmask, double learningRate) apply(image, fgmask, learningRate, Stream::Null()); } +void MOG2Impl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + if(!_knownForegroundMask.empty()) + { + CV_Error( Error::StsNotImplemented, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate, Stream::Null()); +} + +void MOG2Impl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate, Stream &stream){ + if(!_knownForegroundMask.empty()) + { + CV_Error( Error::StsNotImplemented, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate, stream); +} + void MOG2Impl::apply(InputArray _frame, OutputArray _fgmask, double learningRate, Stream &stream) { using namespace cv::cuda::device::mog2; diff --git a/modules/cudalegacy/include/opencv2/cudalegacy.hpp b/modules/cudalegacy/include/opencv2/cudalegacy.hpp index 8230eaa2171..ec8ea72eca2 100644 --- a/modules/cudalegacy/include/opencv2/cudalegacy.hpp +++ b/modules/cudalegacy/include/opencv2/cudalegacy.hpp @@ -92,6 +92,7 @@ class CV_EXPORTS BackgroundSubtractorGMG : public cv::BackgroundSubtractor public: using cv::BackgroundSubtractor::apply; virtual void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream) = 0; + virtual void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate, Stream& stream) = 0; virtual int getMaxFeatures() const = 0; virtual void setMaxFeatures(int maxFeatures) = 0; diff --git a/modules/cudalegacy/src/fgd.cpp b/modules/cudalegacy/src/fgd.cpp index c75594d1ad2..2882164c8a1 100644 --- a/modules/cudalegacy/src/fgd.cpp +++ b/modules/cudalegacy/src/fgd.cpp @@ -54,6 +54,8 @@ Ptr cv::cuda::createBackgroundSubtractorFGD(const #else #include "cuda/fgd.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/core/utils/logger.hpp" ///////////////////////////////////////////////////////////////////////// // FGDParams @@ -539,6 +541,7 @@ namespace ~FGDImpl(); void apply(InputArray image, OutputArray fgmask, double learningRate=-1); + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1); void getBackgroundImage(OutputArray backgroundImage) const; @@ -581,6 +584,14 @@ namespace { } + void FGDImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate); + } + void FGDImpl::apply(InputArray _frame, OutputArray fgmask, double) { GpuMat curFrame = _frame.getGpuMat(); diff --git a/modules/cudalegacy/src/gmg.cpp b/modules/cudalegacy/src/gmg.cpp index a982d8689bf..28f9cf4fa4a 100644 --- a/modules/cudalegacy/src/gmg.cpp +++ b/modules/cudalegacy/src/gmg.cpp @@ -41,6 +41,7 @@ //M*/ #include "precomp.hpp" +#include "opencv2/core/utils/logger.hpp" using namespace cv; using namespace cv::cuda; @@ -73,6 +74,10 @@ namespace void apply(InputArray image, OutputArray fgmask, double learningRate=-1); void apply(InputArray image, OutputArray fgmask, double learningRate, Stream& stream); + // Overloaded Background Subtractor Applys featuring knownForegroundMask parameter + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate=-1); + void apply(InputArray image, InputArray knownForegroundMask, OutputArray fgmask, double learningRate, Stream& stream); + void getBackgroundImage(OutputArray backgroundImage) const; int getMaxFeatures() const { return maxFeatures_; } @@ -165,6 +170,22 @@ namespace apply(image, fgmask, learningRate, Stream::Null()); } + void GMGImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate){ + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate); + } + + void GMGImpl::apply(InputArray _image, InputArray _knownForegroundMask, OutputArray _fgmask, double learningRate, Stream& stream){ + if(!_knownForegroundMask.empty()) + { + CV_LOG_WARNING(NULL, "Known Foreground Masking has not been implemented for this specific background subtractor, falling back to subtraction without known foreground"); + } + apply(_image, _fgmask, learningRate, stream); + } + void GMGImpl::apply(InputArray _frame, OutputArray _fgmask, double newLearningRate, Stream& stream) { using namespace cv::cuda::device::gmg; diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index 9f5ac94d74c..67545dda902 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -66,6 +66,7 @@ file(GLOB gapi_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/onevpl/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp" + "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/pysrc/*.hpp" ) set(gapi_srcs @@ -242,6 +243,9 @@ set(gapi_srcs src/streaming/gstreamer/gstreamer_media_adapter.cpp src/streaming/gstreamer/gstreamerenv.cpp + # Python Custom Stream source + src/pysrc/python_stream_source.cpp + # Utils (ITT tracing) src/utils/itt.cpp ) diff --git a/modules/gapi/include/opencv2/gapi/pysrc/python_stream_source.hpp b/modules/gapi/include/opencv2/gapi/pysrc/python_stream_source.hpp new file mode 100644 index 00000000000..b43dea53053 --- /dev/null +++ b/modules/gapi/include/opencv2/gapi/pysrc/python_stream_source.hpp @@ -0,0 +1,65 @@ +#ifndef OPENCV_GAPI_PYSRC_PYTHONSTREAMSOURCE_HPP +#define OPENCV_GAPI_PYSRC_PYTHONSTREAMSOURCE_HPP +#include +#include + +namespace cv { +namespace gapi { +namespace wip { + +/** + * @brief Creates a G-API IStreamSource that delegates to a Python-defined source. + * + * This factory function wraps a Python object (for example, an instance of a class + * implementing a `pull()` and a `descr_of()` method) into a `cv::gapi::wip::IStreamSource`, + * enabling it to be used within a G-API computation graph. The OpenCV Python bindings + * automatically convert the PyObject into a `cv::Ptr`. + * + * @param src + * A `cv::Ptr` that internally holds the original Python object. + * + * @return + * A `cv::Ptr` that wraps the provided Python object. On each frame pull, + * G-API will: + * - Acquire the Python GIL + * - Call the Python object’s `pull()` method + * - Convert the resulting NumPy array to a `cv::Mat` + * - Pass the `cv::Mat` into the G-API pipeline + * + * @note + * In Python, you can use the returned `make_py_src` as follows: + * + * @code{.py} + * class MyClass: + * def __init__(self): + * # Initialize your source + * def pull(self): + * # Return the next frame as a numpy.ndarray or None for end-of-stream + * def descr_of(self): + * # Return a numpy.ndarray that describes the format of the frames + * + * # Create a G-API source from a Python class + * py_src = cv.gapi.wip.make_py_src(MyClass()) + * + * # Define a simple graph: input → copy → output + * g_in = cv.GMat() + * g_out = cv.gapi.copy(g_in) + * graph = cv.GComputation(g_in, g_out) + * + * # Compile the pipeline for streaming and assign the source + * pipeline = graph.compileStreaming() + * pipeline.setSource([py_src]) + * pipeline.start() + * @endcode + */ + +CV_EXPORTS_W cv::Ptr +make_py_src(const cv::Ptr& src); + + +} // namespace wip +} // namespace gapi +} // namespace cv + + +#endif // OPENCV_GAPI_PYSRC_PYTHONSTREAMSOURCE_HPP diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp index 66c3910756b..6dcf3081e30 100644 --- a/modules/gapi/misc/python/pyopencv_gapi.hpp +++ b/modules/gapi/misc/python/pyopencv_gapi.hpp @@ -1162,6 +1162,161 @@ bool pyopencv_to(PyObject* obj, cv::GProtoOutputArgs& value, const ArgInfo& info } } +namespace cv { +namespace gapi { +namespace wip { + +/** + * @class PythonCustomStreamSource + * @brief Wraps a Python-defined frame source as an IStreamSource for G-API. + * + * This class allows a G-API pipeline to pull frames from a Python object. The Python object + * must implement at a `pull()` method which must return a `numpy.ndarray` containing + * the next frame, or `None` to signal end-of-stream. It can also implement a `descr_of()` + * method which must return a `numpy.ndarray` that describes the format + * (data type, number of channels, height, width) of the frames produced. + */ + +class PythonCustomStreamSource : public IStreamSource +{ + public: + PythonCustomStreamSource(PyObject* _obj = nullptr) : obj(_obj) + { + if (obj) + Py_INCREF(obj); + } + + ~PythonCustomStreamSource() + { + if (obj) + Py_DECREF(obj); + } + + bool pull(cv::gapi::wip::Data& data) CV_OVERRIDE + { + if (!obj) + return false; + + PyObject* src = reinterpret_cast(obj); + + PyGILState_STATE gstate; + gstate = PyGILState_Ensure(); + + PyObject* result = PyObject_CallMethodObjArgs(src, PyUnicode_FromString("pull"), NULL); + bool hasPyPullError = PyErr_Occurred() != nullptr; + + if (!result) + { + PyErr_Print(); + PyGILState_Release(gstate); + CV_Error(cv::Error::StsError, "PythonCustomStreamSource::pull(): call to .pull() failed"); + } + + if (result == Py_None) + { + Py_DECREF(result); + PyGILState_Release(gstate); + return false; + } + + if (!PyArray_Check(result)) + { + PyErr_Format(PyExc_TypeError, "Expected numpy.ndarray from .pull()"); + PyErr_Print(); + Py_DECREF(result); + PyGILState_Release(gstate); + CV_Error(cv::Error::StsError, "PythonCustomStreamSource::pull(): .pull() did not return a numpy.ndarray"); + } + + cv::Mat mat; + ArgInfo info("pull return", 0); + if (!pyopencv_to(result, mat, info) || PyErr_Occurred()) + { + PyErr_Print(); + Py_DECREF(result); + PyGILState_Release(gstate); + CV_Error(cv::Error::StsError, "PythonCustomStreamSource::pull(): failed to convert numpy to cv::Mat"); + } + + if (mat.empty()) + { + Py_DECREF(result); + PyGILState_Release(gstate); + return false; + } + + data = mat; + Py_DECREF(result); + PyGILState_Release(gstate); + + if (hasPyPullError) + CV_Error(cv::Error::StsError, "Python .pull() call error"); + + return true; + } + + GMetaArg descr_of() const CV_OVERRIDE + { + if (!obj) + return cv::GMetaArg(cv::GFrameDesc{cv::MediaFormat::BGR, cv::Size(640, 480)}); + + PyGILState_STATE gstate = PyGILState_Ensure(); + PyObject* result = PyObject_CallMethodObjArgs(obj, PyUnicode_FromString("descr_of"), NULL); + if (!result) + { + PyErr_Print(); + PyGILState_Release(gstate); + CV_Error(cv::Error::StsError, "PythonCustomStreamSource::descr_of(): conversion error"); + } + + if (!PyArray_Check(result)) { + PyErr_Format(PyExc_TypeError, "Expected numpy.ndarray from .descr_of()"); + PyErr_Print(); + Py_DECREF(result); + PyGILState_Release(gstate); + CV_Error(cv::Error::StsError, "PythonCustomStreamSource::descr_of(): did not return a numpy.ndarray"); + } + + cv::Mat mat; + ArgInfo info("descr_of return", 0); + if (!pyopencv_to(result, mat, info)) + { + PyErr_Print(); + Py_DECREF(result); + PyGILState_Release(gstate); + CV_Error(cv::Error::StsError, "PythonCustomStreamSource::descr_of(): conversion error"); + } + + Py_DECREF(result); + PyGILState_Release(gstate); + cv::GMatDesc mdesc = cv::descr_of(mat); + + return cv::GMetaArg(mdesc); + } + +private: + PyObject* obj; +}; + +inline cv::Ptr make_pysrc_from_pyobject(PyObject* obj) +{ + return cv::makePtr(obj); +} + +} // namespace wip +} // namespace gapi +} // namespace cv + +template<> +bool pyopencv_to(PyObject* obj, cv::Ptr& p, const ArgInfo&) +{ + if (!obj) + return false; + + p = cv::makePtr(obj); + return true; +} + // extend cv.gapi methods #define PYOPENCV_EXTRA_METHODS_GAPI \ {"kernels", CV_PY_FN_WITH_KW(pyopencv_cv_gapi_kernels), "kernels(...) -> GKernelPackage"}, \ diff --git a/modules/gapi/misc/python/test/test_gapi_streaming.py b/modules/gapi/misc/python/test/test_gapi_streaming.py index 7f9de5f767f..2d3f8f27858 100644 --- a/modules/gapi/misc/python/test/test_gapi_streaming.py +++ b/modules/gapi/misc/python/test/test_gapi_streaming.py @@ -539,7 +539,38 @@ def test_gst_multiple_sources_accuracy(self): self.assertEqual(0.0, cv.norm(convertNV12p2BGR(expected1), actual1, cv.NORM_INF)) self.assertEqual(0.0, cv.norm(convertNV12p2BGR(expected2), actual2, cv.NORM_INF)) + def test_python_custom_stream_source(self): + class MySource: + def __init__(self): + self.count = 0 + def pull(self): + if self.count >= 3: + return None + self.count += 1 + return np.ones((10, 10, 3), np.uint8) * self.count + + def descr_of(self): + return np.zeros((10, 10, 3), np.uint8) + + g_in = cv.GMat() + g_out = cv.gapi.copy(g_in) + c = cv.GComputation(g_in, g_out) + + comp = c.compileStreaming() + + src = cv.gapi.wip.make_py_src(MySource()) + comp.setSource([src]) + comp.start() + + frames = [] + while True: + has_frame, frame = comp.pull() + if not has_frame: + break + frames.append(frame) + + self.assertEqual(len(frames), 3) except unittest.SkipTest as e: diff --git a/modules/gapi/src/api/s11n.cpp b/modules/gapi/src/api/s11n.cpp index 989cc791185..b86cd8f278c 100644 --- a/modules/gapi/src/api/s11n.cpp +++ b/modules/gapi/src/api/s11n.cpp @@ -141,5 +141,4 @@ cv::GRunArg cv::gapi::bind(cv::GRunArgP &out) GAPI_Error("This value type is UNKNOWN!"); break; } - return cv::GRunArg(); } diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp index 89f0aa9ed1c..40e91210c50 100644 --- a/modules/gapi/src/backends/common/serialization.cpp +++ b/modules/gapi/src/backends/common/serialization.cpp @@ -206,22 +206,20 @@ IOStream& operator<< (IOStream& os, const cv::RMat& mat) { mat.serialize(os); return os; } -IIStream& operator>> (IIStream& is, cv::RMat&) { +IIStream& operator>> (IIStream&, cv::RMat&) { util::throw_error(std::logic_error("operator>> for RMat should never be called. " "Instead, cv::gapi::deserialize() " "should be used")); - return is; } IOStream& operator<< (IOStream& os, const cv::MediaFrame &frame) { frame.serialize(os); return os; } -IIStream& operator>> (IIStream& is, cv::MediaFrame &) { +IIStream& operator>> (IIStream&, cv::MediaFrame &) { util::throw_error(std::logic_error("operator>> for MediaFrame should never be called. " "Instead, cv::gapi::deserialize() " "should be used")); - return is; } namespace @@ -395,27 +393,23 @@ IOStream& operator<< (IOStream& os, const cv::GArrayDesc &) {return os;} IIStream& operator>> (IIStream& is, cv::GArrayDesc &) {return is;} #if !defined(GAPI_STANDALONE) -IOStream& operator<< (IOStream& os, const cv::UMat &) +IOStream& operator<< (IOStream&, const cv::UMat &) { GAPI_Error("Serialization: Unsupported << for UMat"); - return os; } -IIStream& operator >> (IIStream& is, cv::UMat &) +IIStream& operator >> (IIStream&, cv::UMat &) { GAPI_Error("Serialization: Unsupported >> for UMat"); - return is; } #endif // !defined(GAPI_STANDALONE) -IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &) +IOStream& operator<< (IOStream&, const cv::gapi::wip::IStreamSource::Ptr &) { GAPI_Error("Serialization: Unsupported << for IStreamSource::Ptr"); - return os; } -IIStream& operator >> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &) +IIStream& operator >> (IIStream&, cv::gapi::wip::IStreamSource::Ptr &) { - GAPI_Assert("Serialization: Unsupported >> for IStreamSource::Ptr"); - return is; + GAPI_Error("Serialization: Unsupported >> for IStreamSource::Ptr"); } namespace diff --git a/modules/gapi/src/backends/fluid/gfluidbackend.cpp b/modules/gapi/src/backends/fluid/gfluidbackend.cpp index d24dcd599a9..f363c6165c3 100644 --- a/modules/gapi/src/backends/fluid/gfluidbackend.cpp +++ b/modules/gapi/src/backends/fluid/gfluidbackend.cpp @@ -313,7 +313,7 @@ static int maxLineConsumption(const cv::GFluidKernel::Kind kind, int window, int } } break; case cv::GFluidKernel::Kind::YUV420toRGB: return inPort == 0 ? 2 : 1; break; - default: GAPI_Error("InternalError"); return 0; + default: GAPI_Error("InternalError"); } } @@ -325,7 +325,7 @@ static int borderSize(const cv::GFluidKernel::Kind kind, int window) // Resize never reads from border pixels case cv::GFluidKernel::Kind::Resize: return 0; break; case cv::GFluidKernel::Kind::YUV420toRGB: return 0; break; - default: GAPI_Error("InternalError"); return 0; + default: GAPI_Error("InternalError"); } } diff --git a/modules/gapi/src/backends/fluid/gfluidbuffer.cpp b/modules/gapi/src/backends/fluid/gfluidbuffer.cpp index 2bdbbbecd66..6cbe2dc3692 100644 --- a/modules/gapi/src/backends/fluid/gfluidbuffer.cpp +++ b/modules/gapi/src/backends/fluid/gfluidbuffer.cpp @@ -90,7 +90,7 @@ void fillBorderConstant(int borderSize, cv::Scalar borderValue, cv::Mat& mat) case CV_16S: return &fillConstBorderRow< int16_t>; break; case CV_16U: return &fillConstBorderRow; break; case CV_32F: return &fillConstBorderRow< float >; break; - default: GAPI_Error("InternalError"); return &fillConstBorderRow; + default: GAPI_Error("InternalError"); } }; diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp index 666271d7ba1..592bf43d54f 100644 --- a/modules/gapi/src/compiler/gcompiler.cpp +++ b/modules/gapi/src/compiler/gcompiler.cpp @@ -341,7 +341,6 @@ void cv::gimpl::GCompiler::validateInputMeta() default: GAPI_Error("InternalError"); } - return false; // should never happen }; GAPI_LOG_DEBUG(nullptr, "Total count: " << m_metas.size()); diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp index 6a397faca63..67ad18dfa20 100644 --- a/modules/gapi/src/executor/gstreamingexecutor.cpp +++ b/modules/gapi/src/executor/gstreamingexecutor.cpp @@ -1830,7 +1830,6 @@ bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs) default: GAPI_Error("Unsupported cmd type in pull"); } - GAPI_Error("Unreachable code"); } bool cv::gimpl::GStreamingExecutor::pull(cv::GOptRunArgsP &&outs) diff --git a/modules/gapi/src/pysrc/python_stream_source.cpp b/modules/gapi/src/pysrc/python_stream_source.cpp new file mode 100644 index 00000000000..6ede2ae2013 --- /dev/null +++ b/modules/gapi/src/pysrc/python_stream_source.cpp @@ -0,0 +1,17 @@ +#include +#include +#include +#include + +namespace cv { +namespace gapi { +namespace wip { + +cv::Ptr make_py_src(const cv::Ptr& src) +{ + return src; +} + +} // namespace wip +} // namespace gapi +} // namespace cv diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp index ff274894344..26241c11858 100644 --- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp @@ -2343,6 +2343,9 @@ TEST_F(LimitedSourceInfer, ReleaseFrame) TEST_F(LimitedSourceInfer, ReleaseFrameAsync) { + if (cvtest::skipUnstableTests) + throw SkipTestException("Skip LimitedSourceInfer.ReleaseFrameAsync as it hangs sporadically"); + constexpr int max_frames = 50; constexpr int resources_limit = 4; constexpr int nireq = 8; diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp index 10f8df820fc..343790cc887 100644 --- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp +++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp @@ -327,7 +327,6 @@ class InvalidSource : public cv::gapi::wip::IStreamSource { if (m_curr_frame_id % m_throw_every_nth_frame == 0) { throw std::logic_error(InvalidSource::exception_msg()); - return true; } else { d = cv::Mat(m_mat); } diff --git a/modules/ml/src/knearest.cpp b/modules/ml/src/knearest.cpp index 3d8f9b5d2ed..bd9137b24bd 100644 --- a/modules/ml/src/knearest.cpp +++ b/modules/ml/src/knearest.cpp @@ -396,8 +396,21 @@ class KDTreeImpl CV_FINAL : public Impl { Mat _res, _nr, _d; tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray()); - res.push_back(_res.t()); - _results.assign(res); + if( _results.needed() ) + { + res.push_back(_res.t()); + _results.assign(res); + } + if( _neighborResponses.needed() ) + { + nr.push_back(_nr.t()); + _neighborResponses.assign(nr); + } + if( _dists.needed() ) + { + d.push_back(_d.t()); + _dists.assign(d); + } } return result; // currently always 0 diff --git a/modules/ml/test/test_knearest.cpp b/modules/ml/test/test_knearest.cpp index 80baed96266..56d3e942698 100644 --- a/modules/ml/test/test_knearest.cpp +++ b/modules/ml/test/test_knearest.cpp @@ -39,11 +39,16 @@ TEST(ML_KNearest, accuracy) { SCOPED_TRACE("KDTree"); Mat neighborIndexes; + Mat neighborResponses; + Mat dists; float err = 1000; Ptr knn = KNearest::create(); knn->setAlgorithmType(KNearest::KDTREE); knn->train(trainData, ml::ROW_SAMPLE, trainLabels); - knn->findNearest(testData, 4, neighborIndexes); + knn->findNearest(testData, 4, neighborIndexes, neighborResponses, dists); + EXPECT_EQ(neighborIndexes.size(), Size(4, pointsCount)); + EXPECT_EQ(neighborResponses.size(), Size(4, pointsCount * 2)); + EXPECT_EQ(dists.size(), Size(4, pointsCount)); Mat bestLabels; // The output of the KDTree are the neighbor indexes, not actual class labels // so we need to do some extra work to get actual predictions diff --git a/modules/ximgproc/src/adaptive_manifold_filter_n.cpp b/modules/ximgproc/src/adaptive_manifold_filter_n.cpp index 3aa58cafe73..14d49adbf9c 100644 --- a/modules/ximgproc/src/adaptive_manifold_filter_n.cpp +++ b/modules/ximgproc/src/adaptive_manifold_filter_n.cpp @@ -520,11 +520,29 @@ void AdaptiveManifoldFilterN::h_filter(const Mat1f& src, Mat& dst, float sigma) float* dst_row = dst.ptr(y); dst_row[0] = src_row[0]; - for (int x = 1; x < src.cols; ++x) + int x = 1; + #if CV_ENABLE_UNROLLED && defined(_M_ARM64) + for ( ; x + 1 < src.cols; x += 2 ) { dst_row[x] = src_row[x] + a * (dst_row[x - 1] - src_row[x]); + dst_row[x + 1] = src_row[x + 1] + a * (dst_row[x] - src_row[x + 1]); } - for (int x = src.cols - 2; x >= 0; --x) + #endif + for ( ; x < src.cols; ++x ) + { + dst_row[x] = src_row[x] + a * (dst_row[x - 1] - src_row[x]); + } + + x = src.cols - 2; + + #if CV_ENABLE_UNROLLED && defined(_M_ARM64) + for ( ; x - 1 >= 0; x -= 2 ) + { + dst_row[x] = dst_row[x] + a * (dst_row[x + 1] - dst_row[x]); + dst_row[x - 1] = dst_row[x - 1] + a * (dst_row[x] - dst_row[x - 1]); + } + #endif + for ( ; x >= 0; --x ) { dst_row[x] = dst_row[x] + a * (dst_row[x + 1] - dst_row[x]); } diff --git a/modules/ximgproc/src/disparity_filters.cpp b/modules/ximgproc/src/disparity_filters.cpp index 03a2691df4c..505fde6b8d1 100644 --- a/modules/ximgproc/src/disparity_filters.cpp +++ b/modules/ximgproc/src/disparity_filters.cpp @@ -147,7 +147,6 @@ void DisparityWLSFilterImpl::init(double _lambda, double _sigma_color, bool _use min_disp = _min_disp; valid_disp_ROI = Rect(); right_view_valid_disp_ROI = Rect(); - min_disp=0; lambda = _lambda; sigma_color = _sigma_color; use_confidence = _use_confidence;