From 39d3c9f968bfd2fbc705504bc1439768347852c9 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 15:10:06 +0200 Subject: [PATCH 01/36] minor edits --- src/+cv/getOptimalNewCameraMatrix.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/+cv/getOptimalNewCameraMatrix.cpp b/src/+cv/getOptimalNewCameraMatrix.cpp index 978f73bff..4488f7594 100644 --- a/src/+cv/getOptimalNewCameraMatrix.cpp +++ b/src/+cv/getOptimalNewCameraMatrix.cpp @@ -40,10 +40,11 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } + // Process Mat cameraMatrix(rhs[0].toMat(rhs[0].isSingle() ? CV_32F : CV_64F)), distCoeffs(rhs[1].toMat(rhs[1].isSingle() ? CV_32F : CV_64F)); - Size imageSize(rhs[2].toSize());; + Size imageSize(rhs[2].toSize()); Rect validPixROI; Mat A = getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, alpha, newImageSize, (nlhs>1 ? &validPixROI : NULL), From 57c88182bb8c15357c9a424c401f703d48c0d693 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 15:12:59 +0200 Subject: [PATCH 02/36] doc edits --- +cv/Estimator.m | 2 +- +cv/HoughCircles.m | 7 ++-- +cv/LineSegmentDetector.m | 2 +- +cv/RQDecomp3x3.m | 2 +- +cv/RotatedRect.m | 3 +- +cv/Stitcher.m | 4 +-- +cv/accumulate.m | 4 +-- +cv/accumulateSquare.m | 2 +- +cv/adaptiveThreshold.m | 3 ++ +cv/decomposeProjectionMatrix.m | 2 +- +cv/drawContours.m | 9 ++++++ +cv/estimateAffine2D.m | 2 +- +cv/estimateAffinePartial2D.m | 2 +- +cv/findEssentialMat.m | 2 +- +cv/floodFill.m | 9 +++--- +cv/getOptimalNewCameraMatrix.m | 2 +- +cv/getRectSubPix.m | 11 ++++--- +cv/recoverPose.m | 3 +- +cv/remap.m | 2 +- +cv/rotatedRectangleIntersection.m | 2 +- +cv/solveP3P.m | 12 +++++++ +cv/solvePnP.m | 48 +++++++++++++++++++++++----- +cv/stereoRectifyUncalibrated.m | 2 +- opencv_contrib/+cv/GradientDeriche.m | 4 +-- 24 files changed, 101 insertions(+), 40 deletions(-) diff --git a/+cv/Estimator.m b/+cv/Estimator.m index bdeb86136..5bcd337fa 100644 --- a/+cv/Estimator.m +++ b/+cv/Estimator.m @@ -28,7 +28,7 @@ % * __HomographyBasedEstimator__ Homography based rotation % estimator. % * __AffineBasedEstimator__ Affine transformation based - % estimator. This estimator uses pairwise tranformations + % estimator. This estimator uses pairwise transformations % estimated by matcher to estimate final transformation for % each camera. % diff --git a/+cv/HoughCircles.m b/+cv/HoughCircles.m index 5e32c2241..6fce37cfd 100644 --- a/+cv/HoughCircles.m +++ b/+cv/HoughCircles.m @@ -51,9 +51,10 @@ % ### Note % Usually the function detects the centers of circles well. However, it may % fail to find correct radii. You can assist to the function by specifying the -% radius range (`MinRadius` and `MaxRadius`) if you know it. Or, you may -% ignore the returned radius, use only the center, and find the correct radius -% using an additional procedure. +% radius range (`MinRadius` and `MaxRadius`) if you know it. Or, you may set +% `MaxRadius` to 0 to return centers only without radius search, and find the +% correct radius using an additional procedure. +% % % ## References % [Yuen90]: diff --git a/+cv/LineSegmentDetector.m b/+cv/LineSegmentDetector.m index d850a3c41..2944e47cf 100644 --- a/+cv/LineSegmentDetector.m +++ b/+cv/LineSegmentDetector.m @@ -40,7 +40,7 @@ % norm. default 2.0 % * __AngleTol__ Gradient angle tolerance in degrees. default 22.5 % * __DetectionThreshold__ Detection threshold: - % `-log10(NFA) > DetectionThreshold`. Used only when advancent + % `-log10(NFA) > DetectionThreshold`. Used only when advanced % refinement is chosen. default 0 % * __MinDensity__ Minimal density of aligned region points in the % enclosing rectangle. default 0.7 diff --git a/+cv/RQDecomp3x3.m b/+cv/RQDecomp3x3.m index e8585a85b..83adca0c7 100644 --- a/+cv/RQDecomp3x3.m +++ b/+cv/RQDecomp3x3.m @@ -25,7 +25,7 @@ % there is always more than one sequence of rotations about the three % principal axes that results in the same orientation of an object, eg. see % [Slabaugh]. Returned tree rotation matrices and corresponding three Euler -% angules are only one of the possible solutions. +% angles are only one of the possible solutions. % % ## References % [Slabaugh]: diff --git a/+cv/RotatedRect.m b/+cv/RotatedRect.m index 5ce34e07f..b9808f5d6 100644 --- a/+cv/RotatedRect.m +++ b/+cv/RotatedRect.m @@ -19,7 +19,8 @@ % ## Input % * __pt1__, __pt2__, __pt3__ Any 3 end points `[x,y]` of the % rotated rectangle. They must be given in order (either - % clockwise or anticlockwise). + % clockwise or anticlockwise). By definition, the two sides + % formed by these three points must be perpendicular. % % ## Output % * __rrect__ output rotated rectangle. A structure with the diff --git a/+cv/Stitcher.m b/+cv/Stitcher.m index 596421aac..2b49fe80a 100644 --- a/+cv/Stitcher.m +++ b/+cv/Stitcher.m @@ -444,7 +444,7 @@ function setEstimator(this, estimatorType, varargin) % * __HomographyBasedEstimator__ Homography based rotation % estimator. % * __AffineBasedEstimator__ Affine transformation based - % estimator. This estimator uses pairwise tranformations + % estimator. This estimator uses pairwise transformations % estimated by matcher to estimate final transformation for % each camera. % @@ -515,7 +515,7 @@ function setBundleAdjuster(this, adjusterType, varargin) % % The class uses `BundleAdjusterRay` by default. % - % See also: cv.Stitcher.getBundleAdjuster, cv.cv.BundleAdjuster + % See also: cv.Stitcher.getBundleAdjuster, cv.BundleAdjuster % Stitcher_(this.id, 'setBundleAdjuster', adjusterType, varargin{:}); end diff --git a/+cv/accumulate.m b/+cv/accumulate.m index bb383aa2e..449b7d6e3 100644 --- a/+cv/accumulate.m +++ b/+cv/accumulate.m @@ -1,4 +1,4 @@ -%ACCUMULATE Adds an image to the accumulator +%ACCUMULATE Adds an image to the accumulator image % % dst = cv.accumulate(src, dst) % dst = cv.accumulate(src, dst, 'OptionName',optionValue, ...) @@ -22,7 +22,7 @@ % The function supports multi-channel images. Each channel is processed % independently. % -% The functions accumulate* can be used, for example, to collect statistics +% The function cv.accumulate can be used, for example, to collect statistics % of a scene background viewed by a still camera and for the further % foreground-background segmentation. % diff --git a/+cv/accumulateSquare.m b/+cv/accumulateSquare.m index 6ded85633..68c8d23f9 100644 --- a/+cv/accumulateSquare.m +++ b/+cv/accumulateSquare.m @@ -1,4 +1,4 @@ -%ACCUMULATESQUARE Adds the square of a source image to the accumulator +%ACCUMULATESQUARE Adds the square of a source image to the accumulator image % % dst = cv.accumulateSquare(src, dst) % dst = cv.accumulateSquare(src, dst, 'OptionName',optionValue, ...) diff --git a/+cv/adaptiveThreshold.m b/+cv/adaptiveThreshold.m index da3c329f9..c5ca476b4 100644 --- a/+cv/adaptiveThreshold.m +++ b/+cv/adaptiveThreshold.m @@ -45,5 +45,8 @@ % where `T(x,y)` is a threshold calculated individually for each pixel (see % `Method` parameter). % +% Note: Internally the options `'BorderType','Replicate', 'Isolated',true` are +% used to process boundaries (see cv.copyMakeBorder). +% % See also: cv.threshold, cv.blur, cv.GaussianBlur, adaptthresh, imbinarize % diff --git a/+cv/decomposeProjectionMatrix.m b/+cv/decomposeProjectionMatrix.m index 4ba723844..1580baa16 100644 --- a/+cv/decomposeProjectionMatrix.m +++ b/+cv/decomposeProjectionMatrix.m @@ -24,7 +24,7 @@ % three Euler angles that could be used in OpenGL. Note, there is always more % than one sequence of rotations about the three principal axes that results % in the same orientation of an object, eg. see [Slabaugh]. Returned three -% rotation matrices and corresponding three Euler angules are only one of the +% rotation matrices and corresponding three Euler angles are only one of the % possible solutions. % % The function is based on cv.RQDecomp3x3 diff --git a/+cv/drawContours.m b/+cv/drawContours.m index 4e90d143c..ff1c2357f 100644 --- a/+cv/drawContours.m +++ b/+cv/drawContours.m @@ -67,5 +67,14 @@ % subplot(121), imshow(src), title('Source') % subplot(122), imshow(dst), title('Components') % +% ### Note +% When `Thickness='Filled'`, the function is designed to handle connected +% components with holes correctly even when no hierarchy date is provided. +% This is done by analyzing all the outlines together using even-odd rule. +% This may give incorrect results if you have a joint collection of separately +% retrieved contours. In order to solve this problem, you need to call +% cv.drawContours separately for each sub-group of contours, or iterate over +% the collection using `ContourIdx` parameter. +% % See also: cv.findContours, cv.fillPoly, visboundaries, bwlabel % diff --git a/+cv/estimateAffine2D.m b/+cv/estimateAffine2D.m index 0df0a4d82..55c4ef2d5 100644 --- a/+cv/estimateAffine2D.m +++ b/+cv/estimateAffine2D.m @@ -16,7 +16,7 @@ % which points are inliers. % % ## Options -% * __Method__ Robust method used to compute tranformation. RANSAC is the +% * __Method__ Robust method used to compute transformation. RANSAC is the % default method. The following methods are possible: % * __Ransac__ RANSAC-based robust method. % * __LMedS__ Least-Median robust method diff --git a/+cv/estimateAffinePartial2D.m b/+cv/estimateAffinePartial2D.m index e3e3e7ec8..2db2dd31b 100644 --- a/+cv/estimateAffinePartial2D.m +++ b/+cv/estimateAffinePartial2D.m @@ -16,7 +16,7 @@ % which points are inliers. % % ## Options -% * __Method__ Robust method used to compute tranformation. RANSAC is the +% * __Method__ Robust method used to compute transformation. RANSAC is the % default method. The following methods are possible: % * __Ransac__ RANSAC-based robust method. % * __LMedS__ Least-Median robust method diff --git a/+cv/findEssentialMat.m b/+cv/findEssentialMat.m index 071c3d8c6..a61e997dc 100644 --- a/+cv/findEssentialMat.m +++ b/+cv/findEssentialMat.m @@ -21,7 +21,7 @@ % * __CameraMatrix__ Camera matrix `K = [fx 0 cx; 0 fy cy; 0 0 1]`. Note that % this function assumes that `points1` and `points2` are feature points from % cameras with the same camera matrix. default `eye(3)`. -% * __Method__ Method for computing a essential matrix. One of: +% * __Method__ Method for computing an essential matrix. One of: % * __Ransac__ for the RANSAC algorithm. (default) % * __LMedS__ for the LMedS algorithm. % * __Confidence__ Parameter used for the RANSAC or LMedS methods only. It diff --git a/+cv/floodFill.m b/+cv/floodFill.m index 5c40e4f58..891dd9fdc 100644 --- a/+cv/floodFill.m +++ b/+cv/floodFill.m @@ -24,7 +24,10 @@ % the second variant of the function with the `Mask` option. On output, % pixels in the mask corresponding to filled pixels in the image are set % to 1 or to the value specified in `MaskFillValue` option as described -% below. +% below. Additionally, the function fills the border of the mask with ones +% to simplify internal processing. It is therefore possible to use the same +% mask in multiple calls to the function to make sure the filled areas do +% not overlap. % % ## Options % * __LoDiff__ Maximal lower brightness/color difference between the currently @@ -45,9 +48,7 @@ % * __Mask__ Operation mask that should be a single-channel 8-bit image, 2 % pixels wider and 2 pixels taller than image. Flood-filling cannot go % across non-zero pixels in the input mask. For example, an edge detector -% output can be used as a mask to stop filling at edges. It is possible to -% use the same mask in multiple calls to the function to make sure the -% filled areas do not overlap. Not set by default. +% output can be used as a mask to stop filling at edges. Not set by default. % * __MaskOnly__ If set, the function does not change the image in the output % (`newVal` is ignored), and only fills the output `mask` with the value % specified in `MaskFillValue` as described. This option only make sense in diff --git a/+cv/getOptimalNewCameraMatrix.m b/+cv/getOptimalNewCameraMatrix.m index 117217cc1..8bb1c6fc9 100644 --- a/+cv/getOptimalNewCameraMatrix.m +++ b/+cv/getOptimalNewCameraMatrix.m @@ -34,7 +34,7 @@ % free scaling parameter. By varying this parameter, you may retrieve only % sensible pixels `Alpha=0`, keep all the original image pixels if there is % valuable information in the corners `Alpha=1`, or get something in between. -% When `Alpha>0`, the undistortion result is likely to have some black pixels +% When `Alpha>0`, the undistorted result is likely to have some black pixels % corresponding to "virtual" pixels outside of the captured distorted image. % The original camera matrix, distortion coefficients, the computed new camera % matrix, and `newImageSize` should be passed to cv.initUndistortRectifyMap to diff --git a/+cv/getRectSubPix.m b/+cv/getRectSubPix.m index 07883deb7..2c6f1da35 100644 --- a/+cv/getRectSubPix.m +++ b/+cv/getRectSubPix.m @@ -24,11 +24,12 @@ % dst(x,y) = src(x + center(1) - (size(dst,2)-1)*0.5, % y + center(2) - (size(dst,1)-1)*0.5) % -% where the values of the pixels at non-integer coordinates are retrieved using -% bilinear interpolation. Every channel of multi-channel images is processed -% independently. While the center of the rectangle must be inside the image, -% parts of the rectangle may be outside. In this case, the replication border -% mode is used to extrapolate the pixel values outside of the image. +% where the values of the pixels at non-integer coordinates are retrieved +% using bilinear interpolation. Every channel of multi-channel images is +% processed independently. Also the image should be a single channel or three +% channel image. While the center of the rectangle must be inside the image, +% parts of the rectangle may be outside. In this case, the pixel values +% outside of the image are extrapolated. % % See also: cv.Rect.crop, cv.warpAffine, cv.warpPerspective % diff --git a/+cv/recoverPose.m b/+cv/recoverPose.m index 0c7283cb2..e35a6b2c0 100644 --- a/+cv/recoverPose.m +++ b/+cv/recoverPose.m @@ -19,7 +19,8 @@ % * __mask__ Output mask for inliers in `points1` and `points2`. In the output % mask only inliers which pass the cheirality check. Vector of length N, see % the `Mask` input option. -% * __triangulatedPoints__ 3D points which were reconstructed by triangulation. +% * __triangulatedPoints__ 3D points which were reconstructed by triangulation, +% see cv.triangulatePoints % % ## Options % * __CameraMatrix__ Camera matrix `K = [fx 0 cx; 0 fy cy; 0 0 1]`. Note that diff --git a/+cv/remap.m b/+cv/remap.m index ca0f46530..97873eff4 100644 --- a/+cv/remap.m +++ b/+cv/remap.m @@ -67,7 +67,7 @@ % pairs `(floor(x), floor(y))` and `map2` contains indices in a table of % interpolation coefficients. % -% Note: Due to current implementaion limitations, the size of an input and +% Note: Due to current implementation limitations, the size of an input and % output images should be less than 32767x32767. % % See also: cv.convertMaps, interp2, imwarp diff --git a/+cv/rotatedRectangleIntersection.m b/+cv/rotatedRectangleIntersection.m index 62098bdae..5669af85b 100644 --- a/+cv/rotatedRectangleIntersection.m +++ b/+cv/rotatedRectangleIntersection.m @@ -11,7 +11,7 @@ % * __rect2__ Second rectangle. Similar struct to first. % % ## Output -% * __intersectingRegion__ The output array of the verticies of the +% * __intersectingRegion__ The output array of the vertices of the % intersecting region. It returns at most 8 vertices. A cell array of 2D % points `{[x,y], ...}` % * __result__ types of intersection between rectangles. One of: diff --git a/+cv/solveP3P.m b/+cv/solveP3P.m index 95f140fcc..247157598 100644 --- a/+cv/solveP3P.m +++ b/+cv/solveP3P.m @@ -33,5 +33,17 @@ % corresponding image projections, as well as the camera matrix and the % distortion coefficients. % +% ## References +% [gao2003complete]: +% > X.S. Gao, X.R. Hou, J. Tang, H.F. Chang; "Complete Solution +% > Classification for the Perspective-Three-Point Problem", +% > IEEE Trans. on PAMI, vol. 25, No. 8, p. 930-943, August 2003. +% +% [Ke17]: +% > T. Ke, S. Roumeliotis; "An Efficient Algebraic Solution to the +% > Perspective-Three-Point Problem", IEEE Conference on Computer Vision and +% > Pattern Recognition (CVPR), 2017 +% > [PDF](https://arxiv.org/pdf/1701.08237.pdf) +% % See also: cv.solvePnP % diff --git a/+cv/solvePnP.m b/+cv/solvePnP.m index 36d1779fc..c9421d880 100644 --- a/+cv/solvePnP.m +++ b/+cv/solvePnP.m @@ -51,16 +51,48 @@ % % The function estimates the object pose given a set of object points, % their corresponding image projections, as well as the camera matrix and -% the distortion coefficients. +% the distortion coefficients. See the figure below (more precisely, the +% X-axis of the camera frame is pointing to the right, the Y-axis downward and +% the Z-axis forward): % -% Note: The methods `DLS` and `UPnP` cannot be used as the current -% implementations are unstable and sometimes give completly wrong results. If -% you pass one of these two flags, `EPnP` method will be used instead. +% ![image](https://docs.opencv.org/3.4.0/pnp.jpg) % -% Note: The minimum number of points is 4. In the case of `P3P` and `AP3P` -% methods, it is required to use exactly 4 points (the first 3 points are used -% to estimate all the solutions of the P3P problem, the last one is used to -% retain the best solution that minimizes the reprojection error). +% Points expressed in the world frame `X_w` are projected into the image plane +% `[u,v]` using the perspective projection model `Pi` and the camera intrinsic +% parameters matrix `A`: +% +% [u; v; 1] = A * Pi * M_w^c * [X_w; Y_w; Z_w; 1] +% +% [u; v; 1] = [fx 0 cx; 0 fy cy; 0 0 1] * +% [1 0 0 0; 0 1 0 0; 0 0 1 0] * +% [r11 r12 r13 tx; r21 r22 r23 ty; r31 r32 r33 tz] * +% [X_w; Y_w; Z_w; 1] +% +% The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) +% vectors that allow to transform a 3D point expressed in the world frame into +% the camera frame: +% +% [X_c; Y_c; Z_c; 1] = M_w^c * [X_w; Y_w; Z_w; 1] +% +% [X_c; Y_c; Z_c; 1] = [r11 r12 r13 tx; r21 r22 r23 ty; r31 r32 r33 tz] * +% [X_w; Y_w; Z_w; 1] +% +% ### Notes +% +% * The methods `DLS` and `UPnP` cannot be used as the current implementations +% are unstable and sometimes give completely wrong results. If you pass one +% of these two flags, `EPnP` method will be used instead. +% +% * The minimum number of points is 4 in the general case. In the case of +% `P3P` and `AP3P` methods, it is required to use exactly 4 points (the +% first 3 points are used to estimate all the solutions of the P3P problem, +% the last one is used to retain the best solution that minimizes the +% reprojection error). +% +% * With `Iterative` method and `UseExtrinsicGuess=true`, the minimum number +% of points is 3 (3 points are sufficient to compute a pose but there are up +% to 4 solutions). The initial solution should be close to the global +% solution to converge. % % ## References % [gao2003complete]: diff --git a/+cv/stereoRectifyUncalibrated.m b/+cv/stereoRectifyUncalibrated.m index 33eb0f20b..06bae5e71 100644 --- a/+cv/stereoRectifyUncalibrated.m +++ b/+cv/stereoRectifyUncalibrated.m @@ -23,7 +23,7 @@ % parameter is greater than zero, all the point pairs that do not comply % with the epipolar geometry (that is, the points for which % `|points2{i}' * F * points1{i}| > Threshold`) are rejected prior to -% computing the homographies. Otherwise,all the points are considered +% computing the homographies. Otherwise, all the points are considered % inliers. default 5 % % The function computes the rectification transformations without knowing diff --git a/opencv_contrib/+cv/GradientDeriche.m b/opencv_contrib/+cv/GradientDeriche.m index 297ad130c..ed27e18d3 100644 --- a/opencv_contrib/+cv/GradientDeriche.m +++ b/opencv_contrib/+cv/GradientDeriche.m @@ -16,10 +16,10 @@ % * __AlphaDerive__ double see paper. default 1.0 % * __AlphaMean__ double see paper. default 1.0 % -% For more details about this implementation, please see [deriche1987using]. +% For more details about this implementation, please see [deriche1987]. % % ## References -% [deriche1987using]: +% [deriche1987]: % > Rachid Deriche. "Using Canny's criteria to derive a recursively % > implemented optimal edge detector". International Journal of Computer % > Vision, Volume 1 Issue 2, pages 167-187, 1987. From 615ccdba3e50025fa217a55e3016ff4199e72d6c Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 15:21:07 +0200 Subject: [PATCH 03/36] samples edits --- .../samples/BackgroundSubtractorDemo.m | 25 +++--- samples/corner_subpixels_demo_gui.m | 4 +- samples/face_eyes_detect_demo.m | 89 +++++++++---------- .../gausian_median_blur_bilateral_filter.m | 2 +- samples/generalContours_demo1.m | 4 +- samples/generalContours_demo2.m | 4 +- samples/hull_demo_gui.m | 6 +- samples/moments_demo_gui.m | 14 ++- samples/pca_intro_demo.m | 16 ++-- samples/pointPolygonTest_demo.m | 6 +- 10 files changed, 84 insertions(+), 86 deletions(-) diff --git a/opencv_contrib/samples/BackgroundSubtractorDemo.m b/opencv_contrib/samples/BackgroundSubtractorDemo.m index adc0e40d8..ea5d96bc1 100644 --- a/opencv_contrib/samples/BackgroundSubtractorDemo.m +++ b/opencv_contrib/samples/BackgroundSubtractorDemo.m @@ -38,7 +38,7 @@ % static part of the scene or, more in general, everything that can be % considered as background given the characteristics of the observed scene. % -% <> +% <> % % Background modeling consists of two main steps: % @@ -52,24 +52,23 @@ %% Basics % -% Background subtraction is a major preprocessing steps in many vision based -% applications. For example, consider the cases like visitor counter where a +% Background subtraction is a major preprocessing step in many vision based +% applications. For example, consider the case of a visitor counter where a % static camera takes the number of visitors entering or leaving the room, or % a traffic camera extracting information about the vehicles etc. In all these % cases, first you need to extract the person or vehicles alone. Technically, % you need to extract the moving foreground from static background. % -% If you have an image of background alone, like image of the room without +% If you have an image of background alone, like an image of the room without % visitors, image of the road without vehicles etc, it is an easy job. Just % subtract the new image from the background. You get the foreground objects % alone. But in most of the cases, you may not have such an image, so we need % to extract the background from whatever images we have. It become more -% complicated when there is shadow of the vehicles. Since shadow is also -% moving, simple subtraction will mark that also as foreground. It complicates -% things. +% complicated when there are shadows of the vehicles. Since shadow also move, +% simple subtraction will mark that also as foreground. It complicates things. % % Several algorithms were introduced for this purpose. OpenCV has implemented -% four such algorithms which is very easy to use. We will see them one-by-one. +% four such algorithms which are very easy to use. We will see them one-by-one. % %% BackgroundSubtractorMOG % @@ -106,17 +105,17 @@ % better adaptibility to varying scenes due illumination changes etc. % % As in previous case, we have to create a background subtractor object. Here, -% you have an option of selecting whether shadow to be detected or not. If -% |DetectShadows| is true (which is so by default), it detects and marks -% shadows, but decreases the speed. Shadows will be marked in gray color. +% you have an option of detecting shadows or not. If |DetectShadows| is true +% (which is so by default), it detects and marks shadows, but decreases the +% speed. Shadows will be marked in gray color. % %% BackgroundSubtractorGMG % % This algorithm combines statistical background image estimation and % per-pixel Bayesian segmentation. It was introduced by: % -% * Andrew B. Godbehere, Akihiro Matsukawa, Ken Goldberg, "Visual Tracking of -% Human Visitors under Variable-Lighting Conditions for a Responsive Audi +% * Andrew B. Godbehere, Akihiro Matsukawa, and Ken Goldberg, "Visual Tracking +% of Human Visitors under Variable-Lighting Conditions for a Responsive Audi % Art Installation" in 2012. % % As per the paper, the system ran a successful interactive audio art diff --git a/samples/corner_subpixels_demo_gui.m b/samples/corner_subpixels_demo_gui.m index 2c98a25d7..f4586533c 100644 --- a/samples/corner_subpixels_demo_gui.m +++ b/samples/corner_subpixels_demo_gui.m @@ -20,7 +20,9 @@ function varargout = corner_subpixels_demo_gui(im) % load source image if nargin < 1 - src = imread(fullfile(mexopencv.root(),'test','blox.jpg')); + %im = fullfile(mexopencv.root(),'test','pic3.png'); + im = fullfile(mexopencv.root(),'test','blox.jpg'); + src = imread(im); elseif ischar(im) src = imread(im); else diff --git a/samples/face_eyes_detect_demo.m b/samples/face_eyes_detect_demo.m index 3d52a2a7d..fb55c1324 100644 --- a/samples/face_eyes_detect_demo.m +++ b/samples/face_eyes_detect_demo.m @@ -33,77 +33,76 @@ % Here we will work with face detection. Initially, the algorithm needs a lot % of positive images (images of faces) and negative images (images without % faces) to train the classifier. Then we need to extract features from it. -% For this, haar features shown in below image are used. They are just like +% For this, haar features shown in the below image are used. They are just like % our convolutional kernel. Each feature is a single value obtained by -% subtracting sum of pixels under white rectangle from sum of pixels under -% black rectangle. +% subtracting sum of pixels under the white rectangle from sum of pixels under +% the black rectangle. % -% <> +% <> % -% Now all possible sizes and locations of each kernel is used to calculate -% plenty of features. (Just imagine how much computation it needs? Even a +% Now all possible sizes and locations of each kernel are used to calculate +% lots of features. (Just imagine how much computation it needs? Even a % 24x24 window results over 160000 features). For each feature calculation, we -% need to find sum of pixels under white and black rectangles. To solve this, -% they introduced the integral images. It simplifies calculation of sum of -% pixels, how large may be the number of pixels, to an operation involving -% just four pixels. Nice, isn't it? It makes things super-fast. +% need to find the sum of the pixels under white and black rectangles. To +% solve this, they introduced the integral image. However large your image, it +% reduces the calculations for a given pixels, to an operation involving just +% four pixels. Nice, isn't it? It makes things super-fast. % % But among all these features we calculated, most of them are irrelevant. For -% example, consider the image below. Top row shows two good features. The +% example, consider the image below. The top row shows two good features. The % first feature selected seems to focus on the property that the region of the % eyes is often darker than the region of the nose and cheeks. The second % feature selected relies on the property that the eyes are darker than the -% bridge of the nose. But the same windows applying on cheeks or any other +% bridge of the nose. But the same windows applied to cheeks or any other % place is irrelevant. So how do we select the best features out of 160000+ % features? It is achieved by *Adaboost*. % -% <> +% <> % % For this, we apply each and every feature on all the training images. For % each feature, it finds the best threshold which will classify the faces to -% positive and negative. But obviously, there will be errors or -% misclassifications. We select the features with minimum error rate, which -% means they are the features that best classifies the face and non-face -% images. (The process is not as simple as this. Each image is given an equal -% weight in the beginning. After each classification, weights of misclassified -% images are increased. Then again same process is done. New error rates are -% calculated. Also new weights. The process is continued until required -% accuracy or error rate is achieved or required number of features are -% found). +% positive and negative. Obviously, there will be errors or misclassifications. +% We select the features with minimum error rate, which means they are the +% features that most accurately classify the face and non-face images. (The +% process is not as simple as this. Each image is given an equal weight in the +% beginning. After each classification, weights of misclassified images are +% increased. Then the same process is done. New error rates are calculated. +% Also new weights. The process is continued until the required accuracy or +% error rate is achieved or the required number of features are found). % -% Final classifier is a weighted sum of these weak classifiers. It is called -% weak because it alone can't classify the image, but together with others -% forms a strong classifier. The paper says even 200 features provide +% The final classifier is a weighted sum of these weak classifiers. It is +% called weak because it alone can't classify the image, but together with +% others forms a strong classifier. The paper says even 200 features provide % detection with 95% accuracy. Their final setup had around 6000 features. % (Imagine a reduction from 160000+ features to 6000 features. That is a big % gain). % % So now you take an image. Take each 24x24 window. Apply 6000 features to it. % Check if it is face or not. Wow.. Isn't it a little inefficient and time -% consuming? Yes, it is. Authors have a good solution for that. +% consuming? Yes, it is. The authors have a good solution for that. % -% In an image, most of the image region is non-face region. So it is a better +% In an image, most of the image is non-face region. So it is a better % idea to have a simple method to check if a window is not a face region. If -% it is not, discard it in a single shot. Don't process it again. Instead -% focus on region where there can be a face. This way, we can find more time -% to check a possible face region. +% it is not, discard it in a single shot, and don't process it again. Instead, +% focus on regions where there can be a face. This way, we spend more time +% checking a possible face region. % % For this they introduced the concept of *Cascade of Classifiers*. Instead of -% applying all the 6000 features on a window, group the features into -% different stages of classifiers and apply one-by-one. (Normally first few -% stages will contain very less number of features). If a window fails the -% first stage, discard it. We don't consider remaining features on it. If it -% passes, apply the second stage of features and continue the process. The -% window which passes all stages is a face region. How is the plan ! +% applying all 6000 features on a window, the features are grouped into +% different stages of classifiers and applied one-by-one. (Normally the first +% few stages will contain very many fewer features). If a window fails the +% first stage, discard it. We don't consider the remaining features on it. If +% it passes, apply the second stage of features and continue the process. The +% window which passes all stages is a face region. How is that plan! % -% Authors' detector had 6000+ features with 38 stages with 1, 10, 25, 25 and -% 50 features in first five stages. (Two features in the above image is -% actually obtained as the best two features from Adaboost). According to -% authors, on an average, 10 features out of 6000+ are evaluated per -% sub-window. +% The authors' detector had 6000+ features with 38 stages with 1, 10, 25, 25 +% and 50 features in the first five stages. (The two features in the above +% image are actually obtained as the best two features from Adaboost). +% According to the authors, on average, 10 features out of 6000+ are evaluated +% per sub-window. % % So this is a simple intuitive explanation of how Viola-Jones face detection -% works. Read paper for more details or check out the following references: +% works. Read the paper for more details or check out the following references: % % * Video Lecture on % @@ -116,8 +115,8 @@ % Training. % % Here we will deal with detection. OpenCV already contains many pre-trained -% classifiers for face, eyes, smile etc. Those XML files are stored in -% |opencv/data/haarcascades/| folder. +% classifiers for face, eyes, smiles, etc. Those XML files are stored in +% the |opencv/data/haarcascades/| folder. % %% Code @@ -267,7 +266,7 @@ function download_classifier_xml(fname) if exist(fname, 'file') ~= 2 % attempt to download trained Haar/LBP/HOG classifier from Github - url = 'https://cdn.rawgit.com/opencv/opencv/3.2.0/data/'; + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; [~, f, ext] = fileparts(fname); if strncmpi(f, 'haarcascade_', length('haarcascade_')) url = [url, 'haarcascades/']; diff --git a/samples/gausian_median_blur_bilateral_filter.m b/samples/gausian_median_blur_bilateral_filter.m index b3910ac54..0f64f63d5 100644 --- a/samples/gausian_median_blur_bilateral_filter.m +++ b/samples/gausian_median_blur_bilateral_filter.m @@ -83,7 +83,7 @@ % $$ G_{0}(x, y) = A e^{ \frac{ -(x - \mu_{x})^{2} }{ 2\sigma^{2}_{x} } + % \frac{ -(y - \mu_{y})^{2} }{ 2\sigma^{2}_{y} } } $$ % -% where $\mu$ is the mean (the peak) and $\sigma$ represents the variance +% where $\mu$ is the mean (the peak) and $\sigma^{2}$ represents the variance % (per each of the variables $x$ and $y$). % diff --git a/samples/generalContours_demo1.m b/samples/generalContours_demo1.m index 9c13bc3dc..c1f2c6b7c 100644 --- a/samples/generalContours_demo1.m +++ b/samples/generalContours_demo1.m @@ -8,8 +8,8 @@ % % Sources: % -% * -% * +% * +% * % %% Input diff --git a/samples/generalContours_demo2.m b/samples/generalContours_demo2.m index e26aa060e..f3ac4cf2a 100644 --- a/samples/generalContours_demo2.m +++ b/samples/generalContours_demo2.m @@ -9,8 +9,8 @@ % % Sources: % -% * -% * +% * +% * % %% Input diff --git a/samples/hull_demo_gui.m b/samples/hull_demo_gui.m index 4e792e8b6..fe88f078a 100644 --- a/samples/hull_demo_gui.m +++ b/samples/hull_demo_gui.m @@ -6,14 +6,14 @@ % % Sources: % -% * -% * +% * +% * % function varargout = hull_demo_gui(im) % load source image if nargin < 1 - src = imread(fullfile(mexopencv.root(),'test','HappyFish.jpg')); + src = cv.imread(fullfile(mexopencv.root(),'test','stuff.jpg')); elseif ischar(im) src = imread(im); else diff --git a/samples/moments_demo_gui.m b/samples/moments_demo_gui.m index b156f7df2..db7441ef5 100644 --- a/samples/moments_demo_gui.m +++ b/samples/moments_demo_gui.m @@ -9,8 +9,8 @@ % % Sources: % -% * -% * +% * +% * % function varargout = moments_demo_gui(im) @@ -45,8 +45,7 @@ function onChange(~,~,h) canny_output = cv.Canny(h.src, [thresh thresh*2], 'ApertureSize',3); % Find contours - [contours, hierarchy] = cv.findContours(canny_output, ... - 'Mode','Tree', 'Method','Simple'); + contours = cv.findContours(canny_output, 'Mode','Tree', 'Method','Simple'); % Get the moments and compute the mass center mu = cell(size(contours)); @@ -60,11 +59,10 @@ function onChange(~,~,h) drawing = zeros([size(canny_output) 3], 'uint8'); for i=1:numel(contours) clr = randi([0 255], [1 3], 'uint8'); - drawing = cv.drawContours(drawing, contours, ... - 'Hierarchy',hierarchy, 'ContourIdx',i-1, 'MaxLevel',0, ... - 'Color',clr, 'Thickness',2, 'LineType',8); + drawing = cv.drawContours(drawing, contours, 'ContourIdx',i-1, ... + 'Color',clr, 'Thickness',2); drawing = cv.circle(drawing, mc{i}, 4, ... - 'Color',clr, 'Thickness','Filled', 'LineType',8); + 'Color',clr, 'Thickness','Filled'); end % show result diff --git a/samples/pca_intro_demo.m b/samples/pca_intro_demo.m index 2ffc7b3a5..812042e77 100644 --- a/samples/pca_intro_demo.m +++ b/samples/pca_intro_demo.m @@ -5,8 +5,8 @@ % % Sources: % -% * -% * +% * +% * % %% Theory @@ -14,7 +14,7 @@ % Principal Component Analysis (PCA) is a statistical procedure that extracts % the most important features of a dataset. % -% <> +% <> % % Consider that you have a set of 2D points as it is shown in the figure % above. Each dimension corresponds to a feature you are interested in. Here @@ -38,7 +38,7 @@ % consist of 2 vectors called _eigenvectors_ which are the % _principal components_ of the data set. % -% <> +% <> % % The size of each eigenvector is encoded in the corresponding eigenvalue and % indicates how much the data vary along the principal component. The @@ -131,7 +131,7 @@ function pca_intro_demo() fname = fullfile(mexopencv.root(), 'test', 'pca_test1.jpg'); if exist(fname, 'file') ~= 2 disp('Downloading image...') - url = 'https://cdn.rawgit.com/opencv/opencv/3.2.0/samples/data/pca_test1.jpg'; + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/samples/data/pca_test1.jpg'; urlwrite(url, fname); end src = cv.imread(fname, 'Color',true); @@ -142,7 +142,7 @@ function pca_intro_demo() %% % Find all the contours in the thresholded image - [contours, hierarchy] = cv.findContours(bw, 'Mode','List', 'Method','None'); + contours = cv.findContours(bw, 'Mode','List', 'Method','None'); for i=1:numel(contours) % Calculate the area of each contour a = cv.contourArea(contours{i}); @@ -152,8 +152,8 @@ function pca_intro_demo() end % Draw each contour only for visualisation purposes - src = cv.drawContours(src, contours, 'Hierarchy',hierarchy, ... - 'ContourIdx',i-1, 'MaxLevel',0, 'Color',[255 0 0], 'Thickness',2); + src = cv.drawContours(src, contours, ... + 'ContourIdx',i-1, 'Color',[255 0 0], 'Thickness',2); % Find the orientation of each shape [src, ang] = getOrientation(src, contours{i}); diff --git a/samples/pointPolygonTest_demo.m b/samples/pointPolygonTest_demo.m index 4f2a8b480..4f0fda755 100644 --- a/samples/pointPolygonTest_demo.m +++ b/samples/pointPolygonTest_demo.m @@ -5,8 +5,8 @@ % % Sources: % -% * -% * +% * +% * % %% @@ -22,7 +22,7 @@ %% % get the contours -[contours, hierarchy] = cv.findContours(src, 'Mode','Tree', 'Method','Simple'); +contours = cv.findContours(src, 'Mode','Tree', 'Method','Simple'); contours = cellfun(@(C) cat(1,C{:}), contours, 'UniformOutput',false); assert(~isempty(contours)); From b68e36d2a60139da2f131a1a663678ce6b6856a6 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 15:28:19 +0200 Subject: [PATCH 04/36] add MxArray conversion function convert MxArray to vector of Size_ --- include/mexopencv.hpp | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/include/mexopencv.hpp b/include/mexopencv.hpp index bdc67f40e..c0d4a80b0 100644 --- a/include/mexopencv.hpp +++ b/include/mexopencv.hpp @@ -272,6 +272,48 @@ std::vector > MxArrayToVectorPoint3(const MxArray& arr) return vp; } +/** Convert an MxArray to std::vector> + * + * @param arr MxArray object. In one of the following forms: + * - a cell-array of sizes (2-element vectors) of length \c N, + * e.g: {[w,h], [w,h], ...} + * - a numeric matrix of size \c Nx2, \c Nx1x2, or \c 1xNx2 in the form: + * [w,h; w,h; ...] or cat(3, [w,h], [w,h], ...) + * @return vector of sizes of size \c N + * + * Example: + * @code + * MxArray cellArray(prhs[0]); + * vector vs = MxArrayToVectorSize(cellArray); + * @endcode + */ +template +std::vector > MxArrayToVectorSize(const MxArray& arr) +{ + std::vector > vs; + if (arr.isNumeric()) { + if (arr.numel() == 2) + vs.push_back(arr.toSize_()); + else + arr.toMat(cv::traits::Depth >::value).reshape(2, 0).copyTo(vs); + } + else if (arr.isCell()) { + /* + std::vector va(arr.toVector()); + vs.reserve(va.size()); + for (std::vector::const_iterator it = va.begin(); it != va.end(); ++it) + vs.push_back(it->toSize_()); + */ + vs = arr.toVector( + std::const_mem_fun_ref_t, MxArray>( + &MxArray::toSize_)); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unable to convert MxArray to std::vector>"); + return vs; +} + /** Convert an MxArray to std::vector> * * @param arr MxArray object. In one of the following forms: From 06a986e22b0be848989fbe023f53ca39bf040462 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 15:29:53 +0200 Subject: [PATCH 05/36] core: vectorize Rect and RotatedRect functions --- src/+cv/private/Rect_.cpp | 219 ++++++++++++++++++++++++++----- src/+cv/private/RotatedRect_.cpp | 77 ++++++++--- 2 files changed, 245 insertions(+), 51 deletions(-) diff --git a/src/+cv/private/Rect_.cpp b/src/+cv/private/Rect_.cpp index 43f480436..981c06396 100644 --- a/src/+cv/private/Rect_.cpp +++ b/src/+cv/private/Rect_.cpp @@ -27,69 +27,216 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) if (method == "from2points") { nargchk(nrhs==3 && nlhs<=1); - Point_ pt1(rhs[1].toPoint_()), - pt2(rhs[2].toPoint_()); - Rect_ rect(pt1, pt2); - plhs[0] = MxArray(rect); - + if (rhs[1].isNumeric() && rhs[1].numel() == 2 && + rhs[2].isNumeric() && rhs[2].numel() == 2) { + Point2d pt1(rhs[1].toPoint_()), + pt2(rhs[2].toPoint_()); + Rect2d rect(pt1, pt2); + plhs[0] = MxArray(rect); + } + else { + vector pts1(rhs[1].toVector()), + pts2(rhs[2].toVector()); + if (pts1.size() != pts2.size()) + mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch"); + vector rects; + rects.reserve(pts1.size()); + for (size_t i = 0; i < pts1.size(); ++i) + rects.push_back(Rect2d(pts1[i], pts2[i])); + plhs[0] = (rhs[1].isCell() && rhs[2].isCell()) ? + MxArray(rects) : MxArray(Mat(rects, false).reshape(1, 0)); + } } else if (method == "tl") { nargchk(nrhs==2 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - Point_ pt = rect.tl(); - plhs[0] = MxArray(pt); // 1x2 vector [x,y] + if (rhs[1].isNumeric() && rhs[1].numel() == 4) { + Rect2d rect(rhs[1].toRect_()); + Point2d pt = rect.tl(); + plhs[0] = MxArray(pt); // 1x2 vector [x,y] + } + else { + vector rects(MxArrayToVectorRect(rhs[1])); + vector pts; + pts.reserve(rects.size()); + for (size_t i = 0; i < rects.size(); ++i) + pts.push_back(rects[i].tl()); + plhs[0] = (rhs[1].isCell()) ? + MxArray(pts) : MxArray(Mat(pts, false).reshape(1, 0)); + } } else if (method == "br") { nargchk(nrhs==2 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - Point_ pt = rect.br(); - plhs[0] = MxArray(pt); // 1x2 vector [x,y] + if (rhs[1].isNumeric() && rhs[1].numel() == 4) { + Rect2d rect(rhs[1].toRect_()); + Point2d pt = rect.br(); + plhs[0] = MxArray(pt); // 1x2 vector [x,y] + } + else { + vector rects(MxArrayToVectorRect(rhs[1])); + vector pts; + pts.reserve(rects.size()); + for (size_t i = 0; i < rects.size(); ++i) + pts.push_back(rects[i].br()); + plhs[0] = (rhs[1].isCell()) ? + MxArray(pts) : MxArray(Mat(pts, false).reshape(1, 0)); + } } else if (method == "size") { nargchk(nrhs==2 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - Size_ sz = rect.size(); - plhs[0] = MxArray(sz); // 1x2 vector [w,h] + if (rhs[1].isNumeric() && rhs[1].numel() == 4) { + Rect2d rect(rhs[1].toRect_()); + Size2d sz = rect.size(); + plhs[0] = MxArray(sz); // 1x2 vector [w,h] + } + else { + vector rects(MxArrayToVectorRect(rhs[1])); + vector sz; + sz.reserve(rects.size()); + for (size_t i = 0; i < rects.size(); ++i) + sz.push_back(rects[i].size()); + plhs[0] = (rhs[1].isCell()) ? + MxArray(sz) : MxArray(Mat(sz, false).reshape(1, 0)); + } } else if (method == "area") { nargchk(nrhs==2 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - double a = rect.area(); - plhs[0] = MxArray(a); + if (rhs[1].isNumeric() && rhs[1].numel() == 4) { + Rect2d rect(rhs[1].toRect_()); + double a = rect.area(); + plhs[0] = MxArray(a); + } + else { + vector rects(MxArrayToVectorRect(rhs[1])); + vector va; + va.reserve(rects.size()); + for (size_t i = 0; i < rects.size(); ++i) + va.push_back(rects[i].area()); + plhs[0] = MxArray(va); + } } else if (method == "contains") { nargchk(nrhs==3 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - Point_ pt(rhs[2].toPoint_()); - plhs[0] = MxArray(rect.contains(pt)); + Rect2d rect(rhs[1].toRect_()); + if (rhs[2].isNumeric() && rhs[2].numel() == 2) { + Point2d pt(rhs[2].toPoint_()); + plhs[0] = MxArray(rect.contains(pt)); + } + else { + vector pts(rhs[2].toVector()); + vector vb; + vb.reserve(pts.size()); + for (size_t i = 0; i < pts.size(); ++i) + vb.push_back(rect.contains(pts[i])); + plhs[0] = MxArray(vb); + } } else if (method == "adjustPosition") { nargchk(nrhs==3 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - Point_ pt(rhs[2].toPoint_()); - rect += pt; - plhs[0] = MxArray(rect); + if (rhs[1].isNumeric() && rhs[1].numel() == 4 && + rhs[2].isNumeric() && rhs[2].numel() == 2) { + Rect2d rect(rhs[1].toRect_()); + Point2d pt(rhs[2].toPoint_()); + rect += pt; + plhs[0] = MxArray(rect); + } + else { + vector rects(MxArrayToVectorRect(rhs[1])); + if (rhs[2].isNumeric() && rhs[2].numel() == 2) { + Point2d pt(rhs[2].toPoint_()); + for (size_t i = 0; i < rects.size(); ++i) + rects[i] += pt; + } + else { + vector pts(rhs[2].toVector()); + if (rects.size() != pts.size()) + mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch"); + for (size_t i = 0; i < rects.size(); ++i) + rects[i] += pts[i]; + } + plhs[0] = (rhs[1].isCell()) ? + MxArray(rects) : MxArray(Mat(rects, false).reshape(1, 0)); + } } else if (method == "adjustSize") { nargchk(nrhs==3 && nlhs<=1); - Rect_ rect(rhs[1].toRect_()); - Size_ sz(rhs[2].toSize_()); - rect += sz; - plhs[0] = MxArray(rect); + if (rhs[1].isNumeric() && rhs[1].numel() == 4 && + rhs[2].isNumeric() && rhs[2].numel() == 2) { + Rect2d rect(rhs[1].toRect_()); + Size2d sz(rhs[2].toSize_()); + rect += sz; + plhs[0] = MxArray(rect); + } + else { + vector rects(MxArrayToVectorRect(rhs[1])); + if (rhs[2].isNumeric() && rhs[2].numel() == 2) { + Size2d sz(rhs[2].toSize_()); + for (size_t i = 0; i < rects.size(); ++i) + rects[i] += sz; + } + else { + vector sz(MxArrayToVectorSize(rhs[2])); + if (rects.size() != sz.size()) + mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch"); + for (size_t i = 0; i < rects.size(); ++i) + rects[i] += sz[i]; + } + plhs[0] = (rhs[1].isCell()) ? + MxArray(rects) : MxArray(Mat(rects, false).reshape(1, 0)); + } } else if (method == "intersect") { nargchk(nrhs==3 && nlhs<=1); - Rect_ rect1(rhs[1].toRect_()), - rect2(rhs[2].toRect_()); - rect1 &= rect2; - plhs[0] = MxArray(rect1); + if (rhs[1].isNumeric() && rhs[1].numel() == 4 && + rhs[2].isNumeric() && rhs[2].numel() == 4) { + Rect2d rect1(rhs[1].toRect_()), + rect2(rhs[2].toRect_()); + rect1 &= rect2; + plhs[0] = MxArray(rect1); + } + else { + vector rects1(MxArrayToVectorRect(rhs[1])); + if (rhs[2].isNumeric() && rhs[2].numel() == 4) { + Rect2d rect2(rhs[2].toRect_()); + for (size_t i = 0; i < rects1.size(); ++i) + rects1[i] &= rect2; + } + else { + vector rects2(MxArrayToVectorRect(rhs[2])); + if (rects1.size() != rects2.size()) + mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch"); + for (size_t i = 0; i < rects1.size(); ++i) + rects1[i] &= rects2[i]; + } + plhs[0] = (rhs[1].isCell()) ? + MxArray(rects1) : MxArray(Mat(rects1, false).reshape(1, 0)); + } } else if (method == "union") { nargchk(nrhs==3 && nlhs<=1); - Rect_ rect1(rhs[1].toRect_()), - rect2(rhs[2].toRect_()); - rect1 |= rect2; - plhs[0] = MxArray(rect1); + if (rhs[1].isNumeric() && rhs[1].numel() == 4 && + rhs[2].isNumeric() && rhs[2].numel() == 4) { + Rect2d rect1(rhs[1].toRect_()), + rect2(rhs[2].toRect_()); + rect1 |= rect2; + plhs[0] = MxArray(rect1); + } + else { + vector rects1(MxArrayToVectorRect(rhs[1])); + if (rhs[2].isNumeric() && rhs[2].numel() == 4) { + Rect2d rect2(rhs[2].toRect_()); + for (size_t i = 0; i < rects1.size(); ++i) + rects1[i] |= rect2; + } + else { + vector rects2(MxArrayToVectorRect(rhs[2])); + if (rects1.size() != rects2.size()) + mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch"); + for (size_t i = 0; i < rects1.size(); ++i) + rects1[i] |= rects2[i]; + } + plhs[0] = (rhs[1].isCell()) ? + MxArray(rects1) : MxArray(Mat(rects1, false).reshape(1, 0)); + } } else if (method == "crop") { nargchk((nrhs==3 || nrhs==4) && nlhs<=1); diff --git a/src/+cv/private/RotatedRect_.cpp b/src/+cv/private/RotatedRect_.cpp index 4f9ab3e76..f1f20027f 100644 --- a/src/+cv/private/RotatedRect_.cpp +++ b/src/+cv/private/RotatedRect_.cpp @@ -27,30 +27,77 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) if (method == "from3points") { nargchk(nrhs==4 && nlhs<=1); - Point2f pt1(rhs[1].toPoint2f()), - pt2(rhs[2].toPoint2f()), - pt3(rhs[3].toPoint2f()); - plhs[0] = MxArray(RotatedRect(pt1, pt2, pt3)); + if (rhs[1].isNumeric() && rhs[1].numel() == 2 && + rhs[2].isNumeric() && rhs[2].numel() == 2 && + rhs[3].isNumeric() && rhs[3].numel() == 2) { + Point2f pt1(rhs[1].toPoint2f()), + pt2(rhs[2].toPoint2f()), + pt3(rhs[3].toPoint2f()); + plhs[0] = MxArray(RotatedRect(pt1, pt2, pt3)); + } + else { + vector pts1(rhs[1].toVector()), + pts2(rhs[2].toVector()), + pts3(rhs[3].toVector()); + if (pts1.size() != pts2.size() || pts1.size() != pts3.size()) + mexErrMsgIdAndTxt("mexopencv:error", "Length mismatch"); + vector rrects; + rrects.reserve(pts1.size()); + for (size_t i = 0; i < pts1.size(); ++i) + rrects.push_back(RotatedRect(pts1[i], pts2[i], pts3[i])); + plhs[0] = MxArray(rrects); + } } else if (method == "points") { nargchk(nrhs==2 && nlhs<=1); - RotatedRect rect(rhs[1].toRotatedRect()); - Point2f pt[4]; - rect.points(pt); - vector v(pt, pt+4); - plhs[0] = MxArray(Mat(v).reshape(1,0)); // 4x2 matrix + Point2f pts[4]; + if (rhs[1].numel() == 1) { + RotatedRect rrect(rhs[1].toRotatedRect()); + rrect.points(pts); + plhs[0] = MxArray(Mat(4, 2, CV_32F, pts)); // 4x2 matrix + } + else { + vector rrects(rhs[1].toVector()); + vector vvp; + vvp.reserve(rrects.size()); + for (size_t i = 0; i < rrects.size(); ++i) { + rrects[i].points(pts); + vvp.push_back(Mat(4, 2, CV_32F, pts).clone()); + } + plhs[0] = MxArray(vvp); // cell array of 4x2 matrices + } } else if (method == "boundingRect") { nargchk(nrhs==2 && nlhs<=1); - RotatedRect rect(rhs[1].toRotatedRect()); - Rect r = rect.boundingRect(); - plhs[0] = MxArray(r); + if (rhs[1].numel() == 1) { + RotatedRect rrect(rhs[1].toRotatedRect()); + Rect r = rrect.boundingRect(); + plhs[0] = MxArray(r); + } + else { + vector rrects(rhs[1].toVector()); + vector vr; + vr.reserve(rrects.size()); + for (size_t i = 0; i < rrects.size(); ++i) + vr.push_back(rrects[i].boundingRect()); + plhs[0] = MxArray(Mat(vr, false).reshape(1, 0)); + } } else if (method == "boundingRect2f") { nargchk(nrhs==2 && nlhs<=1); - RotatedRect rect(rhs[1].toRotatedRect()); - Rect_ r = rect.boundingRect2f(); - plhs[0] = MxArray(r); + if (rhs[1].numel() == 1) { + RotatedRect rrect(rhs[1].toRotatedRect()); + Rect2f r = rrect.boundingRect2f(); + plhs[0] = MxArray(r); + } + else { + vector rrects(rhs[1].toVector()); + vector vr; + vr.reserve(rrects.size()); + for (size_t i = 0; i < rrects.size(); ++i) + vr.push_back(rrects[i].boundingRect2f()); + plhs[0] = MxArray(Mat(vr, false).reshape(1, 0)); + } } else mexErrMsgIdAndTxt("mexopencv:error", From 53c86b8f4164d3d76639e0967943f7d4437b80b3 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 15:44:17 +0200 Subject: [PATCH 06/36] calib3d: new fisheye functions (issue #378) --- +cv/fisheyeCalibrate.m | 56 ++++++++++ +cv/fisheyeDistortPoints.m | 23 ++++ ...timateNewCameraMatrixForUndistortRectify.m | 24 ++++ +cv/fisheyeInitUndistortRectifyMap.m | 29 +++++ +cv/fisheyeProjectPoints.m | 38 +++++++ +cv/fisheyeStereoCalibrate.m | 58 ++++++++++ +cv/fisheyeStereoRectify.m | 44 ++++++++ +cv/fisheyeUndistortImage.m | 45 ++++++++ +cv/fisheyeUndistortPoints.m | 21 ++++ src/+cv/fisheyeCalibrate.cpp | 81 ++++++++++++++ src/+cv/fisheyeDistortPoints.cpp | 47 ++++++++ ...mateNewCameraMatrixForUndistortRectify.cpp | 55 ++++++++++ src/+cv/fisheyeInitUndistortRectifyMap.cpp | 61 +++++++++++ src/+cv/fisheyeProjectPoints.cpp | 66 +++++++++++ src/+cv/fisheyeStereoCalibrate.cpp | 103 ++++++++++++++++++ src/+cv/fisheyeStereoRectify.cpp | 82 ++++++++++++++ src/+cv/fisheyeUndistortImage.cpp | 47 ++++++++ src/+cv/fisheyeUndistortPoints.cpp | 49 +++++++++ test/unit_tests/TestFisheyeCalibrate.m | 19 ++++ test/unit_tests/TestFisheyeDistortPoints.m | 19 ++++ ...timateNewCameraMatrixForUndistortRectify.m | 19 ++++ .../TestFisheyeInitUndistortRectifyMap.m | 19 ++++ test/unit_tests/TestFisheyeProjectPoints.m | 19 ++++ test/unit_tests/TestFisheyeStereoCalibrate.m | 19 ++++ test/unit_tests/TestFisheyeStereoRectify.m | 19 ++++ test/unit_tests/TestFisheyeUndistortImage.m | 19 ++++ test/unit_tests/TestFisheyeUndistortPoints.m | 19 ++++ 27 files changed, 1100 insertions(+) create mode 100644 +cv/fisheyeCalibrate.m create mode 100644 +cv/fisheyeDistortPoints.m create mode 100644 +cv/fisheyeEstimateNewCameraMatrixForUndistortRectify.m create mode 100644 +cv/fisheyeInitUndistortRectifyMap.m create mode 100644 +cv/fisheyeProjectPoints.m create mode 100644 +cv/fisheyeStereoCalibrate.m create mode 100644 +cv/fisheyeStereoRectify.m create mode 100644 +cv/fisheyeUndistortImage.m create mode 100644 +cv/fisheyeUndistortPoints.m create mode 100644 src/+cv/fisheyeCalibrate.cpp create mode 100644 src/+cv/fisheyeDistortPoints.cpp create mode 100644 src/+cv/fisheyeEstimateNewCameraMatrixForUndistortRectify.cpp create mode 100644 src/+cv/fisheyeInitUndistortRectifyMap.cpp create mode 100644 src/+cv/fisheyeProjectPoints.cpp create mode 100644 src/+cv/fisheyeStereoCalibrate.cpp create mode 100644 src/+cv/fisheyeStereoRectify.cpp create mode 100644 src/+cv/fisheyeUndistortImage.cpp create mode 100644 src/+cv/fisheyeUndistortPoints.cpp create mode 100644 test/unit_tests/TestFisheyeCalibrate.m create mode 100644 test/unit_tests/TestFisheyeDistortPoints.m create mode 100644 test/unit_tests/TestFisheyeEstimateNewCameraMatrixForUndistortRectify.m create mode 100644 test/unit_tests/TestFisheyeInitUndistortRectifyMap.m create mode 100644 test/unit_tests/TestFisheyeProjectPoints.m create mode 100644 test/unit_tests/TestFisheyeStereoCalibrate.m create mode 100644 test/unit_tests/TestFisheyeStereoRectify.m create mode 100644 test/unit_tests/TestFisheyeUndistortImage.m create mode 100644 test/unit_tests/TestFisheyeUndistortPoints.m diff --git a/+cv/fisheyeCalibrate.m b/+cv/fisheyeCalibrate.m new file mode 100644 index 000000000..43b8a67aa --- /dev/null +++ b/+cv/fisheyeCalibrate.m @@ -0,0 +1,56 @@ +%FISHEYECALIBRATE Performs camera calibaration (fisheye) +% +% [K, D, rms] = cv.fisheyeCalibrate(objectPoints, imagePoints, imageSize) +% [K, D, rms, rvecs, tvecs] = cv.fisheyeCalibrate(...) +% [...] = cv.fisheyeCalibrate(..., 'OptionName',optionValue, ...) +% +% ## Input +% * __objectPoints__ A cell array of cells of calibration pattern points in +% the calibration pattern coordinate space `{{[x,y,z], ..}, ...}`. +% * __imagePoints__ A cell array of cells of the projections of calibration +% pattern points `{{[x,y], ..}, ...}`. `numel(imagePoints)` and +% `numel(objectPoints)` must be equal, and `length(imagePoints{i})` must be +% equal to `length(objectPoints{i})` for each `i`. +% * __imageSize__ Size of the image used only to initialize the intrinsic +% camera matrix `[w,h]`. +% +% ## Output +% * __K__ Output 3x3 floating-point camera matrix `[fx 0 cx; 0 fy cy; 0 0 1]`. +% * __D__ Output vector of distortion coefficients `[k1, k2, k3, k4]`. +% * __rms__ the overall RMS re-projection error. +% * __rvecs__ Output cell array of rotation vectors (see cv.Rodrigues) +% estimated for each pattern view. That is, each k-th rotation vector +% together with the corresponding k-th translation vector (see the next +% output parameter description) brings the calibration pattern from the +% model coordinate space (in which object points are specified) to the world +% coordinate space, that is, a real position of the calibration pattern in +% the k-th pattern view (`k=1:M`). +% * __tvecs__ Output cell array of translation vectors estimated for each +% pattern view (cell array of 3-element vectors). +% +% ## Options +% * __CameraMatrix__ Input 3x3 camera matrix used as initial value for `K`. If +% `UseIntrinsicGuess` is specified, some or all of `fx`, `fy`, `cx`, `cy` +% must be initialized before calling the function. +% * __DistCoeffs__ Input 4 elements vector used as an initial values of `D`. +% * __UseIntrinsicGuess__ `K` contains valid initial values of `fx`, `fy`, +% `cx`, `cy` that are optimized further. Otherwise, `(cx,cy)` is initially +% set to the image center (`imageSize` is used), and focal distances are +% computed in a least-squares fashion. default false +% * __RecomputeExtrinsic__ Extrinsic will be recomputed after each iteration +% of intrinsic optimization. default false +% * __CheckCond__ The functions will check validity of condition number. +% default false +% * __FixSkew__ Skew coefficient (alpha) is set to zero and stay zero. +% default false +% * __FixK1__, ..., __FixK4__ Selected distortion coefficients are set to +% zeros and stay zero. default false +% * __FixPrincipalPoint__ The principal point is not changed during the global +% optimization. It stays at the center or at a different location specified +% when `UseIntrinsicGuess` is set too. default false +% * __Criteria__ Termination criteria for the iterative optimization algorithm. +% default `struct('type','Count+EPS', 'maxCount',100, 'epsilon',eps)` +% +% See also: cv.fisheyeStereoCalibrate, cv.fisheyeUndistortImage, +% cv.calibrateCamera +% diff --git a/+cv/fisheyeDistortPoints.m b/+cv/fisheyeDistortPoints.m new file mode 100644 index 000000000..23d0f1691 --- /dev/null +++ b/+cv/fisheyeDistortPoints.m @@ -0,0 +1,23 @@ +%FISHEYEDISTORTPOINTS Distorts 2D points using fisheye model +% +% distorted = cv.fisheyeDistortPoints(undistorted, K, D); +% [...] = cv.fisheyeDistortPoints(..., 'OptionName',optionValue, ...); +% +% ## Input +% * __undistorted__ Object points. A Nx2, 1xNx2, or Nx1x2 array, where N is +% the number of points in the view. +% * __K__ Camera matrix 3x3, `K = [fx 0 cx; 0 fy cy; 0 0 1]`. +% * __D__ Input vector of distortion coefficients `[k1,k2,k3,k4]`. +% +% ## Output +% * __distorted__ Output array of image points. +% +% ## Options +% * __Alpha__ The skew coefficient. default 0 +% +% Note that the function assumes the camera matrix of the undistorted points +% to be identity. This means if you want to transform back points undistorted +% with cv.fisheyeUndistortPoints you have to multiply them with `inv(P)`. +% +% See also: cv.fisheyeUndistortPoints +% diff --git a/+cv/fisheyeEstimateNewCameraMatrixForUndistortRectify.m b/+cv/fisheyeEstimateNewCameraMatrixForUndistortRectify.m new file mode 100644 index 000000000..df80c1001 --- /dev/null +++ b/+cv/fisheyeEstimateNewCameraMatrixForUndistortRectify.m @@ -0,0 +1,24 @@ +%FISHEYEESTIMATENEWCAMERAMATRIXFORUNDISTORTRECTIFY Estimates new camera matrix for undistortion or rectification (fisheye) +% +% P = cv.fisheyeEstimateNewCameraMatrixForUndistortRectify(K, D, imageSize) +% [...] = cv.fisheyeEstimateNewCameraMatrixForUndistortRectify(..., 'OptionName',optionValue, ...) +% +% ## Input +% * __K__ Camera matrix 3x3, `K = [fx 0 cx; 0 fy cy; 0 0 1]`. +% * __D__ Input vector of distortion coefficients `[k1,k2,k3,k4]`. +% * __imageSize__ Size of the image `[w,h]`. +% +% ## Output +% * __P__ New camera matrix (3x3). +% +% ## Options +% * __R__ Rectification transformation in the object space +% (3x3 matrix or 1x3/3x1 vector). +% * __Balance__ Sets the new focal length in range between the min focal +% length and the max focal length. Balance is in range of [0,1]. default 0 +% * __NewImageSize__ Image size after rectification `[w,h]`. By default, it is +% set to `imageSize`. +% * __FOVScale__ Divisor for new focal length. default 1.0 +% +% See also: cv.fisheyeInitUndistortRectifyMap, cv.getOptimalNewCameraMatrix +% diff --git a/+cv/fisheyeInitUndistortRectifyMap.m b/+cv/fisheyeInitUndistortRectifyMap.m new file mode 100644 index 000000000..9e096627b --- /dev/null +++ b/+cv/fisheyeInitUndistortRectifyMap.m @@ -0,0 +1,29 @@ +%FISHEYEINITUNDISTORTRECTIFYMAP Computes undistortion and rectification maps (fisheye) +% +% [map1, map2] = cv.fisheyeInitUndistortRectifyMap(K, D, siz) +% [...] = cv.fisheyeInitUndistortRectifyMap(..., 'OptionName',optionValue, ...) +% +% ## Input +% * __K__ Camera matrix 3x3, `K = [fx 0 cx; 0 fy cy; 0 0 1]`. +% * __D__ Input vector of distortion coefficients `[k1,k2,k3,k4]`. +% * __size__ Undistorted image size `[w,h]`. +% +% ## Output +% * __map1__ The first output map. See `M1Type`. +% * __map2__ The second output map. See `M1Type`. +% +% ## Options +% * __R__ Rectification transformation in the object space +% (3x3 matrix or 1x3/3x1 vector). +% * __P__ New camera matrix (3x3) or new projection matrix (3x4). +% * __M1Type__ Type of the first output map, default -1 (equivalent to +% `int16`). See cv.convertMaps for details. One of: +% * __int16__ (fixed-point representation). +% * __single1__ (separate floating-point representation). +% +% The function computes undistortion and rectification maps for image +% transform by cv.remap. If `D` is empty zero distortion is used, if `R` or +% `P` is empty identity matrixes are used. +% +% See also: cv.initUndistortRectifyMap, cv.remap, cv.convertMaps +% diff --git a/+cv/fisheyeProjectPoints.m b/+cv/fisheyeProjectPoints.m new file mode 100644 index 000000000..1c3771092 --- /dev/null +++ b/+cv/fisheyeProjectPoints.m @@ -0,0 +1,38 @@ +%FISHEYEPROJECTPOINTS Projects points using fisheye model +% +% imagePoints = cv.fisheyeProjectPoints(objectPoints, rvec, tvec, K) +% [imagePoints, jacobian] = cv.fisheyeProjectPoints(...) +% [...] = cv.fisheyeProjectPoints(..., 'OptionName', optionValue, ...) +% +% ## Input +% * __objectPoints__ Array of object points, Nx3/Nx1x3/1xNx3 array or cell +% array of 3-element vectors `{[x,y,z],...}`, where `N` is the number of +% points in the view. +% * __rvec__ Rotation vector or matrix (3x1/1x3 or 3x3). See cv.Rodrigues for +% details. +% * __tvec__ Translation vector (3x1/1x3). +% * __K__ Camera matrix 3x3, `K = [fx 0 cx; 0 fy cy; 0 0 1]`. +% +% ## Output +% * __imagePoints__ Output array of image points, Nx2/Nx1x2/1xNx2 array or +% cell array of 2-element vectors `{[x,y], ...}`. +% * __jacobian__ Optional output `(2N)x(2+2+4+3+3+1)` jacobian matrix of +% derivatives of image points with respect to components of the +% focal lengths (2), coordinates of the principal point (2), distortion +% coefficients (4), rotation vector (3), translation vector (3), and the +% skew (1). +% +% ## Options +% * __DistCoeffs__ Input vector of distortion coefficients `[k1,k2,k3,k4]`. If +% the vector is empty, the zero distortion coefficients are assumed. +% default empty +% * __Alpha__ The skew coefficient. default 0 +% +% The function computes projections of 3D points to the image plane given +% intrinsic and extrinsic camera parameters. Optionally, the function computes +% Jacobians - matrices of partial derivatives of image points coordinates +% (as functions of all the input parameters) with respect to the particular +% parameters, intrinsic and/or extrinsic. +% +% See also: cv.projectPoints +% diff --git a/+cv/fisheyeStereoCalibrate.m b/+cv/fisheyeStereoCalibrate.m new file mode 100644 index 000000000..b54b1d70f --- /dev/null +++ b/+cv/fisheyeStereoCalibrate.m @@ -0,0 +1,58 @@ +%FISHEYESTEREOCALIBRATE Performs stereo calibration (fisheye) +% +% S = cv.fisheyeStereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize) +% [...] = cv.fisheyeStereoCalibrate(..., 'OptionName',optionValue, ...) +% +% ## Input +% * __objectPoints__ A cell array of cells of the calibration pattern points +% `{{[x,y,z], ..}, ...}`. +% * __imagePoints1__ A cell array of cells of the projections of the +% calibration pattern points `{{[x,y], ..}, ...}`, observed by the first +% camera. +% * __imagePoints2__ A cell array of cells of the projections of the +% calibration pattern points `{{[x,y], ..}, ...}`, observed by the second +% camera. +% * __imageSize__ Size of the image used only to initialize intrinsic camera +% matrix `[w,h]`. +% +% ## Output +% * __S__ scalar struct having the following fields: +% * __cameraMatrix1__ Output first camera matrix +% `[fx1 0 cx1; 0 fy1 cy1; 0 0 1]`. +% * __distCoeffs1__ Output vector of distortion coefficients +% `[k1, k2, k3, k4]` of 4 elements. +% * __cameraMatrix2__ Output second camera matrix +% `[fx2 0 cx2; 0 fy2 cy2; 0 0 1]`. The parameter is similar to `K1`. +% * __distCoeffs2__ Output lens distortion coefficients for the second +% camera. The parameter is similar to `D1`. +% * __R__ Output 3x3 rotation matrix between the 1st and the 2nd camera +% coordinate systems. +% * __T__ Output 3x1 translation vector between the coordinate systems of +% the cameras. +% * __reprojErr__ output final re-projection error (scalar). +% +% ## Options +% * __CameraMatrix1__, __CameraMatrix2__ Initial camera matrices. If any of +% `UseIntrinsicGuess`, `FixIntrinsic` (default) are specified, some or all +% of the matrix components must be initialized. See the flags description +% for details. +% * __DistCoeffs1__, __DistCoeffs2__ Initial lens distortion coefficients. +% * __FixIntrinsic__ Fix `K1`, `K2` and `D1`, `D2` so that only `R`, `T` +% matrices are estimated. default true +% * __UseIntrinsicGuess__ `K1`, `K2` contains valid initial values of `fx`, +% `fy`, `cx`, `cy` that are optimized further. Otherwise, `(cx,cy)` is +% initially set to the image center (`imageSize` is used), and focal +% distances are computed in a least-squares fashion. default false +% * __RecomputeExtrinsic__ Extrinsic will be recomputed after each iteration +% of intrinsic optimization. default false +% * __CheckCond__ The functions will check validity of condition number. +% default false +% * __FixSkew__ Skew coefficient (alpha) is set to zero and stay zero. +% default false +% * __FixK1__, ..., __FixK4__ Selected distortion coefficients are set to +% zeros and stay zero. default false +% * __Criteria__ Termination criteria for the iterative optimization algorithm. +% default `struct('type','Count+EPS', 'maxCount',100, 'epsilon',eps)` +% +% See also: cv.fisheyeCalibrate, cv.fisheyeStereoRectify, cv.stereoCalibrate +% diff --git a/+cv/fisheyeStereoRectify.m b/+cv/fisheyeStereoRectify.m new file mode 100644 index 000000000..6a04c37de --- /dev/null +++ b/+cv/fisheyeStereoRectify.m @@ -0,0 +1,44 @@ +%FISHEYESTEREORECTIFY Stereo rectification for fisheye camera model +% +% S = cv.fisheyeStereoRectify(K1, D1, K2, D2, imageSize, R, T) +% [...] = cv.fisheyeStereoRectify(..., 'OptionName',optionValue, ...) +% +% ## Input +% * __K1__ First camera matrix 3x3. +% * __D1__ First camera distortion parameters of 4 elements. +% * __K2__ Second camera matrix 3x3. +% * __D2__ Second camera distortion parameters of 4 elements. +% * __imageSize__ Size of the image used for stereo calibration `[w,h]`. +% * __R__ Rotation matrix between the coordinate systems of the first and the +% second cameras, 3x3/3x1 (see cv.Rodrigues). +% * __T__ Translation vector between coordinate systems of the cameras, 3x1. +% +% ## Output +% * __S__ scalar struct having the following fields: +% * __R1__ 3x3 rectification transform (rotation matrix) for the first +% camera. +% * __R2__ 3x3 rectification transform (rotation matrix) for the second +% camera. +% * __P1__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the first camera. +% * __P2__ 3x4 projection matrix in the new (rectified) coordinate systems +% for the second camera. +% * __Q__ 4x4 disparity-to-depth mapping matrix (see cv.reprojectImageTo3D). +% +% ## Options +% * __ZeroDisparity__ If the flag is set, the function makes the principal +% points of each camera have the same pixel coordinates in the rectified +% views. And if the flag is not set, the function may still shift the images +% in the horizontal or vertical direction (depending on the orientation of +% epipolar lines) to maximize the useful image area. default true +% * __NewImageSize__ New image resolution after rectification. The same size +% should be passed to cv.fisheyeInitUndistortRectifyMap. When [0,0] is +% passed (default), it is set to the original `imageSize`. Setting it to +% larger value can help you preserve details in the original image, +% especially when there is a big radial distortion. +% * __Balance__ Sets the new focal length in range between the min focal +% length and the max focal length. Balance is in range of [0,1]. default 0 +% * __FOVScale__ Divisor for new focal length. default 1.0 +% +% See also: cv.fisheyeStereoCalibrate, cv.stereoRectify +% diff --git a/+cv/fisheyeUndistortImage.m b/+cv/fisheyeUndistortImage.m new file mode 100644 index 000000000..b3462096c --- /dev/null +++ b/+cv/fisheyeUndistortImage.m @@ -0,0 +1,45 @@ +%FISHEYEUNDISTORTIMAGE Transforms an image to compensate for fisheye lens distortion +% +% undistorted = cv.fisheyeUndistortImage(distorted, K, D) +% undistorted = cv.fisheyeUndistortImage(..., 'OptionName',optionValue, ...) +% +% ## Input +% * __distorted__ image with fisheye lens distortion. +% * __K__ Camera matrix 3x3, `K = [fx 0 cx; 0 fy cy; 0 0 1]`. +% * __D__ Input vector of distortion coefficients `[k1,k2,k3,k4]`. +% +% ## Output +% * __undistorted__ Output image with compensated fisheye lens distortion. +% +% ## Options +% * __NewCameraMatrix__ Camera matrix of the distorted image. By default, it +% is the identity matrix but you may additionally scale and shift the result +% by using a different matrix. +% * __NewImageSize__ Image size after rectification `[w,h]`. By default, it is +% set to input image size. +% +% The function transforms an image to compensate radial and tangential lens +% distortion. +% +% The function is simply a combination of cv.fisheyeInitUndistortRectifyMap +% (with unity `R`) and cv.remap (with bilinear interpolation). See the former +% function for details of the transformation being performed. +% +% See below the results of cv.fisheyeUndistortImage: +% +% * a) result of cv.undistort of perspective camera model (all possible +% coefficients `[k1, k2, k3, k4, k5, k6]` of distortion were optimized under +% calibration) +% * b) result of cv.fisheyeUndistortImage of fisheye camera model (all +% possible coefficients `[k1, k2, k3, k4]` of fisheye distortion were +% optimized under calibration) +% * c) original image was captured with fisheye lens +% +% Pictures a) and b) almost the same. But if we consider points of image +% located far from the center of image, we can notice that on image a) these +% points are distorted. +% +% ![image](https://docs.opencv.org/3.3.1/fisheye_undistorted.jpg) +% +% See also: cv.fisheyeInitUndistortRectifyMap, cv.remap, cv.undistort +% diff --git a/+cv/fisheyeUndistortPoints.m b/+cv/fisheyeUndistortPoints.m new file mode 100644 index 000000000..ae816a984 --- /dev/null +++ b/+cv/fisheyeUndistortPoints.m @@ -0,0 +1,21 @@ +%FISHEYEUNDISTORTPOINTS Undistorts 2D points using fisheye model +% +% undistorted = cv.fisheyeUndistortPoints(distorted, K, D); +% [...] = cv.fisheyeUndistortPoints(..., 'OptionName',optionValue, ...); +% +% ## Input +% * __distorted__ Object points. An Nx2, 1xNx2, or Nx1x2 array, where N is the +% number of points in the view. +% * __K__ Camera matrix 3x3, `K = [fx 0 cx; 0 fy cy; 0 0 1]`. +% * __D__ Input vector of distortion coefficients `[k1,k2,k3,k4]`. +% +% ## Output +% * __undistorted__ Output array of image points. +% +% ## Options +% * __R__ Rectification transformation in the object space +% (3x3 matrix or 1x3/3x1 vector). +% * __P__ New camera matrix (3x3) or new projection matrix (3x4). +% +% See also: cv.fisheyeUndistortImage, cv.undistortPoints +% diff --git a/src/+cv/fisheyeCalibrate.cpp b/src/+cv/fisheyeCalibrate.cpp new file mode 100644 index 000000000..d82070298 --- /dev/null +++ b/src/+cv/fisheyeCalibrate.cpp @@ -0,0 +1,81 @@ +/** + * @file fisheyeCalibrate.cpp + * @brief mex interface for cv::fisheye::calibrate + * @ingroup calib3d + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +using namespace std; +using namespace cv; + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=5); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat K, D; + int flags = 0; + TermCriteria criteria(TermCriteria::COUNT+TermCriteria::EPS, 100, DBL_EPSILON); + for (int i=3; i > objectPoints(MxArrayToVectorVectorPoint3(rhs[0])); + vector > imagePoints(MxArrayToVectorVectorPoint(rhs[1])); + Size imageSize(rhs[2].toSize()); + vector rvecs, tvecs; + double rms = fisheye::calibrate( + objectPoints, imagePoints, imageSize, K, D, + (nlhs>3 ? rvecs : noArray()), + (nlhs>4 ? tvecs : noArray()), + flags, criteria); + plhs[0] = MxArray(K); + if (nlhs > 1) + plhs[1] = MxArray(D); + if (nlhs > 2) + plhs[2] = MxArray(rms); + if (nlhs > 3) + plhs[3] = MxArray(rvecs); + if (nlhs > 4) + plhs[4] = MxArray(tvecs); +} diff --git a/src/+cv/fisheyeDistortPoints.cpp b/src/+cv/fisheyeDistortPoints.cpp new file mode 100644 index 000000000..c2526bf46 --- /dev/null +++ b/src/+cv/fisheyeDistortPoints.cpp @@ -0,0 +1,47 @@ +/** + * @file fisheyeDistortPoints.cpp + * @brief mex interface for cv::fisheye::distortPoints + * @ingroup calib3d + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +using namespace std; +using namespace cv; + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + double alpha = 0; + for (int i=3; i=3 && (nrhs%2)==1 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat R; + double balance = 0.0; + Size newImageSize; + double fov_scale = 1.0; + for (int i=3; i M1Type = ConstMap + ("int16", CV_16SC2) + ("single1", CV_32FC1) + ("single2", CV_32FC2); +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=2); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat R, P; + int m1type = -1; + for (int i=3; i1) + plhs[1] = MxArray(map2); +} diff --git a/src/+cv/fisheyeProjectPoints.cpp b/src/+cv/fisheyeProjectPoints.cpp new file mode 100644 index 000000000..d8703dff3 --- /dev/null +++ b/src/+cv/fisheyeProjectPoints.cpp @@ -0,0 +1,66 @@ +/** + * @file fisheyeProjectPoints.cpp + * @brief mex interface for cv::fisheye::projectPoints + * @ingroup calib3d + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +using namespace std; +using namespace cv; + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=2); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat D; + double alpha = 0; + for (int i=4; i1 ? jacobian : noArray())); + if (objectPoints.channels() == 1 && objectPoints.cols == 3) + imagePoints = imagePoints.reshape(1,0); // Nx2 + plhs[0] = MxArray(imagePoints); + } + else if (rhs[0].isCell()) { + vector objectPoints(rhs[0].toVector()); + vector imagePoints; + fisheye::projectPoints(objectPoints, imagePoints, + rvec, tvec, K, D, alpha, (nlhs>1 ? jacobian : noArray())); + plhs[0] = MxArray(imagePoints); + } + else + mexErrMsgIdAndTxt("mexopencv:error", "Invalid points argument"); + if (nlhs>1) + plhs[1] = MxArray(jacobian); +} diff --git a/src/+cv/fisheyeStereoCalibrate.cpp b/src/+cv/fisheyeStereoCalibrate.cpp new file mode 100644 index 000000000..7abafe535 --- /dev/null +++ b/src/+cv/fisheyeStereoCalibrate.cpp @@ -0,0 +1,103 @@ +/** + * @file fisheyeStereoCalibrate.cpp + * @brief mex interface for cv::fisheye::stereoCalibrate + * @ingroup calib3d + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +using namespace std; +using namespace cv; + +namespace { +/** Create a new MxArray from stereo calibration results. + * @param K1 First camera matrix. + * @param D1 Distortion coefficients of first camera. + * @param K2 Second camera matrix. + * @param D2 Distortion coefficients of second camera. + * @param R Rotation matrix between the cameras coordinate systems. + * @param T Translation vector between the cameras coordinate systems. + * @param rms Re-projection error. + * @return output MxArray struct object. + */ +MxArray toStruct(const Mat& K1, const Mat& D1, const Mat& K2, const Mat& D2, + const Mat& R, const Mat& T, double rms) +{ + const char* fieldnames[] = {"cameraMatrix1", "distCoeffs1", + "cameraMatrix2", "distCoeffs2", "R", "T", "reprojErr"}; + MxArray s = MxArray::Struct(fieldnames, 7); + s.set("cameraMatrix1", K1); + s.set("distCoeffs1", D1); + s.set("cameraMatrix2", K2); + s.set("distCoeffs2", D2); + s.set("R", R); + s.set("T", T); + s.set("reprojErr", rms); + return s; +} +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat K1, D1, K2, D2; + int flags = cv::CALIB_FIX_INTRINSIC; + TermCriteria criteria(TermCriteria::COUNT+TermCriteria::EPS, 100, DBL_EPSILON); + for (int i=4; i > objectPoints(MxArrayToVectorVectorPoint3(rhs[0])); + vector > imagePoints1(MxArrayToVectorVectorPoint(rhs[1])); + vector > imagePoints2(MxArrayToVectorVectorPoint(rhs[2])); + Size imageSize(rhs[3].toSize()); + Mat R, T; + double rms = fisheye::stereoCalibrate(objectPoints, imagePoints1, imagePoints2, + K1, D1, K2, D2, imageSize, R, T, flags, criteria); + plhs[0] = toStruct(K1, D1, K2, D2, R, T, rms); +} diff --git a/src/+cv/fisheyeStereoRectify.cpp b/src/+cv/fisheyeStereoRectify.cpp new file mode 100644 index 000000000..cffd1bc83 --- /dev/null +++ b/src/+cv/fisheyeStereoRectify.cpp @@ -0,0 +1,82 @@ +/** + * @file fisheyeStereoRectify.cpp + * @brief mex interface for cv::fisheye::stereoRectify + * @ingroup calib3d + * @author Amro + * @date 2017 + */ +#include "mexopencv.hpp" +using namespace std; +using namespace cv; + +namespace { +/** Create a new MxArray from stereo rectified transforms. + * @param R1 Rectification transform for the first camera. + * @param R2 Rectification transform for the second camera. + * @param P1 Projection matrix in new coord systems of first camera. + * @param P2 Projection matrix in new coord systems of second camera. + * @param Q Disparity-to-depth mapping matrix. + * @return output MxArray struct object. + */ +MxArray toStruct(const Mat& R1, const Mat& R2, const Mat& P1, const Mat& P2, + const Mat& Q) +{ + const char* fieldnames[] = {"R1", "R2", "P1", "P2", "Q"}; + MxArray s = MxArray::Struct(fieldnames, 5); + s.set("R1", R1); + s.set("R2", R2); + s.set("P1", P1); + s.set("P2", P2); + s.set("Q", Q); + return s; +} +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=7 && (nrhs%2)==1 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + int flags = cv::CALIB_ZERO_DISPARITY; + Size newImageSize; + double balance = 0.0; + double fov_scale = 1.0; + for (int i=7; i=3 && (nrhs%2)==1 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat Knew; + Size new_size; + for (int i=3; i=3 && (nrhs%2)==1 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + Mat R, P; + for (int i=3; i Date: Sun, 28 Jan 2018 16:05:56 +0200 Subject: [PATCH 07/36] calib3d: wrong nargout check (fix bug #381) --- src/+cv/recoverPose.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/+cv/recoverPose.cpp b/src/+cv/recoverPose.cpp index fd90f1c85..b40eb3faf 100644 --- a/src/+cv/recoverPose.cpp +++ b/src/+cv/recoverPose.cpp @@ -19,7 +19,7 @@ using namespace cv; void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Check the number of arguments - nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=4); + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=5); // Argument vector vector rhs(prhs, prhs+nrhs); From 4aaa410206414c3e43158cca9b5a0d4157ee277f Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 16:33:22 +0200 Subject: [PATCH 08/36] imgproc: new resize interpolation method --- +cv/resize.m | 1 + include/mexopencv.hpp | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/+cv/resize.m b/+cv/resize.m index 4a85d0ae4..c4147f609 100644 --- a/+cv/resize.m +++ b/+cv/resize.m @@ -26,6 +26,7 @@ % method for image decimation, as it gives moire-free results. But when % the image is zoomed, it is similar to the 'Nearest' method. % * __Lanczos4__ a Lanczos interpolation over 8x8 pixel neighborhood +% * __LinearExact__ a bit exact bilinear interpolation % % The function cv.resize resizes the image `src` down to or up to the % specified size. The size and type of `dst` are derived from `src`, `dsize`, diff --git a/include/mexopencv.hpp b/include/mexopencv.hpp index c0d4a80b0..43192864a 100644 --- a/include/mexopencv.hpp +++ b/include/mexopencv.hpp @@ -83,11 +83,12 @@ const ConstMap BorderTypeInv = ConstMap /// Interpolation type map for option processing const ConstMap InterpType = ConstMap - ("Nearest", cv::INTER_NEAREST) // nearest neighbor interpolation - ("Linear", cv::INTER_LINEAR) // bilinear interpolation - ("Cubic", cv::INTER_CUBIC) // bicubic interpolation - ("Area", cv::INTER_AREA) // area-based (or super) interpolation - ("Lanczos4", cv::INTER_LANCZOS4); // Lanczos interpolation over 8x8 neighborhood + ("Nearest", cv::INTER_NEAREST) // nearest neighbor interpolation + ("Linear", cv::INTER_LINEAR) // bilinear interpolation + ("Cubic", cv::INTER_CUBIC) // bicubic interpolation + ("Area", cv::INTER_AREA) // area-based (or super) interpolation + ("Lanczos4", cv::INTER_LANCZOS4) // Lanczos interpolation over 8x8 neighborhood + ("LinearExact", cv::INTER_LINEAR_EXACT); // Bit exact bilinear interpolation /// Thresholding type map for option processing const ConstMap ThreshType = ConstMap From d768c8a662d16702e4ca74ce0e397c6506fbe56b Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 16:44:11 +0200 Subject: [PATCH 09/36] imgcodecs: new EXR flags in imwrite also EXR encoder now only accepts CV_32F images --- +cv/imencode.m | 4 ++++ +cv/imwrite.m | 4 ++++ src/+cv/imencode.cpp | 12 +++++++++++- src/+cv/imwrite.cpp | 12 +++++++++++- src/+cv/private/VideoWriter_.cpp | 13 ++++++++++++- test/unit_tests/TestImencode.m | 8 ++++++-- test/unit_tests/TestImwrite.m | 8 +++++++- 7 files changed, 55 insertions(+), 6 deletions(-) diff --git a/+cv/imencode.m b/+cv/imencode.m index ceb0bacd7..e7dbcf4eb 100644 --- a/+cv/imencode.m +++ b/+cv/imencode.m @@ -59,6 +59,10 @@ % bytes as small as possible. default is false. % * __PxmBinary__ For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1, % to specify ASCII or binary encoding. default is true. +% * __ExrType__ override EXR storage type. Note that the EXR encoder only +% accepts `single` images. One of: +% * __Half__ store as half precision (FP16), see cv.convertFp16 +% * __Float__ store as single precision (FP32), this is the default. % * __WebpQuality__ For WEBP, it can be a quality from 1 to 100 (the higher is % the better). By default (without any parameter) and for quality above 100 % the lossless compression is used. diff --git a/+cv/imwrite.m b/+cv/imwrite.m index 281360131..2f8f53088 100644 --- a/+cv/imwrite.m +++ b/+cv/imwrite.m @@ -56,6 +56,10 @@ % bytes as small as possible. default is false. % * __PxmBinary__ For PPM, PGM, or PBM, it can be a binary format flag, 0 or 1, % to specify ASCII or binary encoding. default is true. +% * __ExrType__ override EXR storage type. Note that the EXR encoder only +% accepts `single` images. One of: +% * __Half__ store as half precision (FP16), see cv.convertFp16 +% * __Float__ store as single precision (FP32), this is the default. % * __WebpQuality__ For WEBP, it can be a quality from 1 to 100 (the higher is % the better). By default (without any parameter) and for quality above 100 % the lossless compression is used. diff --git a/src/+cv/imencode.cpp b/src/+cv/imencode.cpp index cbb88141b..702d447bd 100644 --- a/src/+cv/imencode.cpp +++ b/src/+cv/imencode.cpp @@ -18,7 +18,13 @@ const ConstMap PngStrategyMap = ConstMap ("RLE", cv::IMWRITE_PNG_STRATEGY_RLE) ("Fixed", cv::IMWRITE_PNG_STRATEGY_FIXED); -/// PAM tupletypes for option processing +/// EXR storage types for option processing +const ConstMap ExrTypeMap = ConstMap + //("Int", cv::IMWRITE_EXR_TYPE_UNIT) + ("Half", cv::IMWRITE_EXR_TYPE_HALF) + ("Float", cv::IMWRITE_EXR_TYPE_FLOAT); + +/// PAM tuple types for option processing const ConstMap PamFormatMap = ConstMap ("Null", cv::IMWRITE_PAM_FORMAT_NULL) ("BlackWhite", cv::IMWRITE_PAM_FORMAT_BLACKANDWHITE) @@ -88,6 +94,10 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) params.push_back(cv::IMWRITE_PXM_BINARY); params.push_back(rhs[i+1].toBool() ? 1 : 0); } + else if (key == "ExrType") { + params.push_back(cv::IMWRITE_EXR_TYPE); + params.push_back(ExrTypeMap[rhs[i+1].toString()]); + } else if (key == "WebpQuality") { params.push_back(cv::IMWRITE_WEBP_QUALITY); params.push_back(rhs[i+1].toInt()); diff --git a/src/+cv/imwrite.cpp b/src/+cv/imwrite.cpp index 6ae104929..8bf47a3be 100644 --- a/src/+cv/imwrite.cpp +++ b/src/+cv/imwrite.cpp @@ -18,7 +18,13 @@ const ConstMap PngStrategyMap = ConstMap ("RLE", cv::IMWRITE_PNG_STRATEGY_RLE) ("Fixed", cv::IMWRITE_PNG_STRATEGY_FIXED); -/// PAM tupletypes for option processing +/// EXR storage types for option processing +const ConstMap ExrTypeMap = ConstMap + //("Int", cv::IMWRITE_EXR_TYPE_UNIT) + ("Half", cv::IMWRITE_EXR_TYPE_HALF) + ("Float", cv::IMWRITE_EXR_TYPE_FLOAT); + +/// PAM tuple types for option processing const ConstMap PamFormatMap = ConstMap ("Null", cv::IMWRITE_PAM_FORMAT_NULL) ("BlackWhite", cv::IMWRITE_PAM_FORMAT_BLACKANDWHITE) @@ -88,6 +94,10 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) params.push_back(cv::IMWRITE_PXM_BINARY); params.push_back(rhs[i+1].toBool() ? 1 : 0); } + else if (key == "ExrType") { + params.push_back(cv::IMWRITE_EXR_TYPE); + params.push_back(ExrTypeMap[rhs[i+1].toString()]); + } else if (key == "WebpQuality") { params.push_back(cv::IMWRITE_WEBP_QUALITY); params.push_back(rhs[i+1].toInt()); diff --git a/src/+cv/private/VideoWriter_.cpp b/src/+cv/private/VideoWriter_.cpp index e9679d5df..71f4c41e6 100644 --- a/src/+cv/private/VideoWriter_.cpp +++ b/src/+cv/private/VideoWriter_.cpp @@ -44,7 +44,13 @@ const ConstMap PngStrategyMap = ConstMap ("RLE", cv::IMWRITE_PNG_STRATEGY_RLE) ("Fixed", cv::IMWRITE_PNG_STRATEGY_FIXED); -/// PAM tupletypes for option processing +/// EXR storage types for option processing +const ConstMap ExrTypeMap = ConstMap + //("Int", cv::IMWRITE_EXR_TYPE_UNIT) + ("Half", cv::IMWRITE_EXR_TYPE_HALF) + ("Float", cv::IMWRITE_EXR_TYPE_FLOAT); + +/// PAM tuple types for option processing const ConstMap PamFormatMap = ConstMap ("Null", cv::IMWRITE_PAM_FORMAT_NULL) ("BlackWhite", cv::IMWRITE_PAM_FORMAT_BLACKANDWHITE) @@ -168,6 +174,11 @@ struct ImwriteOptionsParser cv::IMWRITE_PXM_BINARY); params.push_back(val.toBool() ? 1 : 0); } + else if (key == "ExrType") { + params.push_back(cv::CAP_PROP_IMAGES_BASE + + cv::IMWRITE_EXR_TYPE); + params.push_back(ExrTypeMap[val.toString()]); + } else if (key == "WebpQuality") { params.push_back(cv::CAP_PROP_IMAGES_BASE + cv::IMWRITE_WEBP_QUALITY); diff --git a/test/unit_tests/TestImencode.m b/test/unit_tests/TestImencode.m index 5c5157590..cbc26ff2a 100644 --- a/test/unit_tests/TestImencode.m +++ b/test/unit_tests/TestImencode.m @@ -6,8 +6,12 @@ frmts = TestImwrite.getFormats(); for i=1:numel(frmts) try - buf = cv.imencode(frmts(i).ext, TestImwrite.im, ... - frmts(i).opts{:}); + if strcmp(frmts(i).ext, '.exr') + img = single(TestImwrite.im) / 255; + else + img = TestImwrite.im; + end + buf = cv.imencode(frmts(i).ext, img, frmts(i).opts{:}); validateattributes(buf, {'uint8'}, {'vector', 'nonempty'}); catch ME %TODO: some codecs are not available on all platforms diff --git a/test/unit_tests/TestImwrite.m b/test/unit_tests/TestImwrite.m index aa9d0e15a..a07a59a71 100644 --- a/test/unit_tests/TestImwrite.m +++ b/test/unit_tests/TestImwrite.m @@ -12,7 +12,12 @@ filename = [tempname() frmts(i).ext]; cObj = onCleanup(@() TestImwrite.deleteFile(filename)); try - cv.imwrite(filename, TestImwrite.im, frmts(i).opts{:}); + if strcmp(frmts(i).ext, '.exr') + img = single(TestImwrite.im) / 255; + else + img = TestImwrite.im; + end + cv.imwrite(filename, img, frmts(i).opts{:}); assert(exist(filename,'file')==2, ... 'Failed to write %s', frmts(i).name); catch ME @@ -110,6 +115,7 @@ function deleteFile(fname) frmts(8).ext = '.jp2'; frmts(9).name = 'OpenEXR'; frmts(9).ext = '.exr'; + frmts(9).opts = {'ExrType','Float'}; frmts(10).name = 'Radiance HDR'; frmts(10).ext = '.hdr'; frmts(11).name = 'PAM'; From 55df23547759334f7824adfa75e581678a772f18 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 16:47:38 +0200 Subject: [PATCH 10/36] features2d: add note about a bug in save method --- src/+cv/private/DescriptorExtractor_.cpp | 2 ++ src/+cv/private/FeatureDetector_.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/+cv/private/DescriptorExtractor_.cpp b/src/+cv/private/DescriptorExtractor_.cpp index f9375137b..3883d09e0 100644 --- a/src/+cv/private/DescriptorExtractor_.cpp +++ b/src/+cv/private/DescriptorExtractor_.cpp @@ -96,6 +96,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) //*/ } else if (method == "save") { + //TODO: crashes due to bug in opencv + // (getDefaultName contains "." which is not allowed in serialized mapping name) nargchk(nrhs==3 && nlhs==0); obj->save(rhs[2].toString()); } diff --git a/src/+cv/private/FeatureDetector_.cpp b/src/+cv/private/FeatureDetector_.cpp index 11cdef09d..976f9db7f 100644 --- a/src/+cv/private/FeatureDetector_.cpp +++ b/src/+cv/private/FeatureDetector_.cpp @@ -96,6 +96,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) //*/ } else if (method == "save") { + //TODO: crashes due to bug in opencv + // (getDefaultName contains "." which is not allowed in serialized mapping name) nargchk(nrhs==3 && nlhs==0); obj->save(rhs[2].toString()); } From 3906426062d5682b833ca496ff968fcd81db4007 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 28 Jan 2018 17:11:26 +0200 Subject: [PATCH 11/36] features2d: fix KeyPoint::convert default values --- +cv/KeyPointsFilter.m | 8 ++++---- src/+cv/private/KeyPointsFilter_.cpp | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/+cv/KeyPointsFilter.m b/+cv/KeyPointsFilter.m index aa8f80ba0..f1ef7a169 100644 --- a/+cv/KeyPointsFilter.m +++ b/+cv/KeyPointsFilter.m @@ -165,12 +165,12 @@ % feature detection algorithm like SIFT/SURF/ORB. % % ## Options - % * __Size__ keypoint diameter. + % * __Size__ keypoint diameter. default 1.0 % * __Response__ keypoint detector response on the keypoint (that - % is, strength of the keypoint). + % is, strength of the keypoint). default 1.0 % * __Octave__ pyramid octave in which the keypoint has been - % detected. - % * __ClassId__ object id. + % detected. default 0 + % * __ClassId__ object id. default -1 % % See also: cv.KeyPointsFilter.convertToPoints % diff --git a/src/+cv/private/KeyPointsFilter_.cpp b/src/+cv/private/KeyPointsFilter_.cpp index c2d5b8496..449bbe0cd 100644 --- a/src/+cv/private/KeyPointsFilter_.cpp +++ b/src/+cv/private/KeyPointsFilter_.cpp @@ -88,7 +88,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); float size = 1.0f; float response = 1.0f; - int octave = 1; + int octave = 0; int class_id = -1; for (int i=2; i Date: Wed, 31 Jan 2018 00:06:19 +0200 Subject: [PATCH 12/36] ml: update ANN - new training method: simulated annealing - new ReLU activation functions --- +cv/ANN_MLP.m | 66 ++++++++++++++++++++++--- src/+cv/private/ANN_MLP_.cpp | 94 ++++++++++++++++++++++-------------- 2 files changed, 118 insertions(+), 42 deletions(-) diff --git a/+cv/ANN_MLP.m b/+cv/ANN_MLP.m index 385bdebd3..4e8294d5c 100644 --- a/+cv/ANN_MLP.m +++ b/+cv/ANN_MLP.m @@ -120,7 +120,9 @@ % Default 'RProp'. Possible values are: % % * __Backprop__ The back-propagation algorithm. - % * __RProp__ (default) The RPROP algorithm. See [101] for details. + % * __RProp__ (default) The RPROP algorithm. See [RPROP93] for details. + % * __Anneal__ The simulated annealing algorithm. See [Kirkpatrick83] + % for details. % % See also: cv.ANN_MLP.setTrainMethod TrainMethod @@ -178,6 +180,16 @@ % % It must be >1. Default value is 50. RpropDWMax + % ANNEAL: initial temperature. It must be >= 0. Default value is 10.0. + AnnealInitialT + % ANNEAL: final temperature. It must be >= 0 and less than + % `AnnealInitialT`. Default value is 0.1. + AnnealFinalT + % ANNEAL: cooling ratio. It must be > 0 and less than 1. Default value + % is 0.95. + AnnealCoolingRatio + % ANNEAL: iteration per step. It must be > 0. Default value is 10. + AnnealItePerStep end properties (Dependent, GetAccess = private) @@ -591,22 +603,31 @@ function setTrainMethod(this, method, varargin) % ## Input % * __method__ Available training methods: % * __Backprop__ The back-propagation algorithm. - % * __RProp__ (default) The RPROP algorithm. See [101] for + % * __RProp__ (default) The RPROP algorithm. See [RPROP93] for % details. + % * __Anneal__ The simulated annealing algorithm. See + % [Kirkpatrick83] for details. % % ## Options % * __Param1__ sets `RpropDW0` property for 'RProp' and sets - % `BackpropWeightScale` property for 'Backprop'. default 0 + % `BackpropWeightScale` property for 'Backprop' and sets + % `AnnealInitialT` for `Anneal`. default 0 % * __Param2__ sets `RpropDWMin` property for 'RProp' and sets - % `BackpropMomentumScale` property for 'Backprop'. default 0 + % `BackpropMomentumScale` property for 'Backprop' and sets + % `AnnealFinalT` for `Anneal`. default 0 % % ## References - % [101]: + % [RPROP93]: % > Martin Riedmiller and Heinrich Braun. "A direct adaptive method % > for faster backpropagation learning: The rprop algorithm". % > In Neural Networks, 1993., IEEE International Conference on, % > pages 586-591. IEEE, 1993. % + % [Kirkpatrick83]: + % > S. Kirkpatrick, C. D. Jr Gelatt, and M. P. Vecchi. + % > "Optimization by simulated annealing". Science, + % > 220(4598):671-680, 1983. + % % See also: cv.ANN_MLP.TrainMethod % ANN_MLP_(this.id, 'setTrainMethod', method, varargin{:}); @@ -626,7 +647,10 @@ function setActivationFunction(this, ftype, varargin) % `f(x) = beta * (1-exp(-alpha*x))/(1+exp(-alpha*x))`. See % note below. % * __Gaussian__ Gaussian function: - % `f(x) = beta * exp(-alpha*x*x)` + % `f(x) = beta * exp(-alpha^2*x*x)` + % * __ReLU__ ReLU function: `f(x) = max(0,x)` + % * __LeakyReLU__ Leaky ReLU function: `f(x) = x, for x>0` and + % `f(x) = alpha*x, for x<=0` % % ## Options % * __Param1__ The first parameter of the activation function, @@ -642,6 +666,8 @@ function setActivationFunction(this, ftype, varargin) % the default parameter values `Param1=0` and `Param2=0` then the % function used is `y = 1.7159*tanh(2/3 * x)`, so the output will % range from [-1.7159, 1.7159], instead of [0,1]. + % Recall that by definition + % `tanh(x) = (1 - exp(-2*x)) / (1 + exp(-2*x))`. % % See also: cv.ANN_MLP.ActivationFunction % @@ -724,6 +750,34 @@ function setActivationFunction(this, ftype, varargin) function set.RpropDWMax(this, value) ANN_MLP_(this.id, 'set', 'RpropDWMax', value); end + + function value = get.AnnealInitialT(this) + value = ANN_MLP_(this.id, 'get', 'AnnealInitialT'); + end + function set.AnnealInitialT(this, value) + ANN_MLP_(this.id, 'set', 'AnnealInitialT', value); + end + + function value = get.AnnealFinalT(this) + value = ANN_MLP_(this.id, 'get', 'AnnealFinalT'); + end + function set.AnnealFinalT(this, value) + ANN_MLP_(this.id, 'set', 'AnnealFinalT', value); + end + + function value = get.AnnealCoolingRatio(this) + value = ANN_MLP_(this.id, 'get', 'AnnealCoolingRatio'); + end + function set.AnnealCoolingRatio(this, value) + ANN_MLP_(this.id, 'set', 'AnnealCoolingRatio', value); + end + + function value = get.AnnealItePerStep(this) + value = ANN_MLP_(this.id, 'get', 'AnnealItePerStep'); + end + function set.AnnealItePerStep(this, value) + ANN_MLP_(this.id, 'set', 'AnnealItePerStep', value); + end end end diff --git a/src/+cv/private/ANN_MLP_.cpp b/src/+cv/private/ANN_MLP_.cpp index 494861859..fc91ffc20 100644 --- a/src/+cv/private/ANN_MLP_.cpp +++ b/src/+cv/private/ANN_MLP_.cpp @@ -21,24 +21,30 @@ map > obj_; /// Option values for ANN_MLP train types const ConstMap ANN_MLPTrain = ConstMap ("Backprop", cv::ml::ANN_MLP::BACKPROP) - ("RProp", cv::ml::ANN_MLP::RPROP); + ("RProp", cv::ml::ANN_MLP::RPROP) + ("Anneal", cv::ml::ANN_MLP::ANNEAL); /// Inverse option values for ANN_MLP train types const ConstMap InvANN_MLPTrain = ConstMap (cv::ml::ANN_MLP::BACKPROP, "Backprop") - (cv::ml::ANN_MLP::RPROP, "RProp"); + (cv::ml::ANN_MLP::RPROP, "RProp") + (cv::ml::ANN_MLP::ANNEAL, "Anneal"); /// Option values for ANN_MLP activation function const ConstMap ActivateFunc = ConstMap - ("Identity", cv::ml::ANN_MLP::IDENTITY) - ("Sigmoid", cv::ml::ANN_MLP::SIGMOID_SYM) - ("Gaussian", cv::ml::ANN_MLP::GAUSSIAN); + ("Identity", cv::ml::ANN_MLP::IDENTITY) + ("Sigmoid", cv::ml::ANN_MLP::SIGMOID_SYM) + ("Gaussian", cv::ml::ANN_MLP::GAUSSIAN) + ("ReLU", cv::ml::ANN_MLP::RELU) + ("LeakyReLU", cv::ml::ANN_MLP::LEAKYRELU); /// Inverse option values for ANN_MLP activation function const ConstMap InvActivateFunc = ConstMap (cv::ml::ANN_MLP::IDENTITY, "Identity") (cv::ml::ANN_MLP::SIGMOID_SYM, "Sigmoid") - (cv::ml::ANN_MLP::GAUSSIAN, "Gaussian"); + (cv::ml::ANN_MLP::GAUSSIAN, "Gaussian") + (cv::ml::ANN_MLP::RELU, "ReLU") + (cv::ml::ANN_MLP::LEAKYRELU, "LeakyReLU"); } /** @@ -246,26 +252,34 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) else if (method == "get") { nargchk(nrhs==3 && nlhs<=1); string prop(rhs[2].toString()); - if (prop == "BackpropMomentumScale") - plhs[0] = MxArray(obj->getBackpropMomentumScale()); - else if (prop == "BackpropWeightScale") - plhs[0] = MxArray(obj->getBackpropWeightScale()); + if (prop == "TrainMethod") + plhs[0] = MxArray(InvANN_MLPTrain[obj->getTrainMethod()]); else if (prop == "LayerSizes") plhs[0] = MxArray(obj->getLayerSizes()); + else if (prop == "TermCriteria") + plhs[0] = MxArray(obj->getTermCriteria()); + else if (prop == "BackpropWeightScale") + plhs[0] = MxArray(obj->getBackpropWeightScale()); + else if (prop == "BackpropMomentumScale") + plhs[0] = MxArray(obj->getBackpropMomentumScale()); else if (prop == "RpropDW0") plhs[0] = MxArray(obj->getRpropDW0()); - else if (prop == "RpropDWMax") - plhs[0] = MxArray(obj->getRpropDWMax()); - else if (prop == "RpropDWMin") - plhs[0] = MxArray(obj->getRpropDWMin()); - else if (prop == "RpropDWMinus") - plhs[0] = MxArray(obj->getRpropDWMinus()); else if (prop == "RpropDWPlus") plhs[0] = MxArray(obj->getRpropDWPlus()); - else if (prop == "TermCriteria") - plhs[0] = MxArray(obj->getTermCriteria()); - else if (prop == "TrainMethod") - plhs[0] = MxArray(InvANN_MLPTrain[obj->getTrainMethod()]); + else if (prop == "RpropDWMinus") + plhs[0] = MxArray(obj->getRpropDWMinus()); + else if (prop == "RpropDWMin") + plhs[0] = MxArray(obj->getRpropDWMin()); + else if (prop == "RpropDWMax") + plhs[0] = MxArray(obj->getRpropDWMax()); + else if (prop == "AnnealInitialT") + plhs[0] = MxArray(obj->getAnnealInitialT()); + else if (prop == "AnnealFinalT") + plhs[0] = MxArray(obj->getAnnealFinalT()); + else if (prop == "AnnealCoolingRatio") + plhs[0] = MxArray(obj->getAnnealCoolingRatio()); + else if (prop == "AnnealItePerStep") + plhs[0] = MxArray(obj->getAnnealItePerStep()); else mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized property %s", prop.c_str()); @@ -273,28 +287,36 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) else if (method == "set") { nargchk(nrhs==4 && nlhs==0); string prop(rhs[2].toString()); - if (prop == "BackpropMomentumScale") - obj->setBackpropMomentumScale(rhs[3].toDouble()); - else if (prop == "BackpropWeightScale") - obj->setBackpropWeightScale(rhs[3].toDouble()); + if (prop == "TrainMethod") + obj->setTrainMethod(ANN_MLPTrain[rhs[3].toString()]); + else if (prop == "ActivationFunction") + obj->setActivationFunction(ActivateFunc[rhs[3].toString()]); else if (prop == "LayerSizes") obj->setLayerSizes(rhs[3].toMat()); + else if (prop == "TermCriteria") + obj->setTermCriteria(rhs[3].toTermCriteria()); + else if (prop == "BackpropWeightScale") + obj->setBackpropWeightScale(rhs[3].toDouble()); + else if (prop == "BackpropMomentumScale") + obj->setBackpropMomentumScale(rhs[3].toDouble()); else if (prop == "RpropDW0") obj->setRpropDW0(rhs[3].toDouble()); - else if (prop == "RpropDWMax") - obj->setRpropDWMax(rhs[3].toDouble()); - else if (prop == "RpropDWMin") - obj->setRpropDWMin(rhs[3].toDouble()); - else if (prop == "RpropDWMinus") - obj->setRpropDWMinus(rhs[3].toDouble()); else if (prop == "RpropDWPlus") obj->setRpropDWPlus(rhs[3].toDouble()); - else if (prop == "TermCriteria") - obj->setTermCriteria(rhs[3].toTermCriteria()); - else if (prop == "TrainMethod") - obj->setTrainMethod(ANN_MLPTrain[rhs[3].toString()]); - else if (prop == "ActivationFunction") - obj->setActivationFunction(ActivateFunc[rhs[3].toString()]); + else if (prop == "RpropDWMinus") + obj->setRpropDWMinus(rhs[3].toDouble()); + else if (prop == "RpropDWMin") + obj->setRpropDWMin(rhs[3].toDouble()); + else if (prop == "RpropDWMax") + obj->setRpropDWMax(rhs[3].toDouble()); + else if (prop == "AnnealInitialT") + obj->setAnnealInitialT(rhs[3].toDouble()); + else if (prop == "AnnealFinalT") + obj->setAnnealFinalT(rhs[3].toDouble()); + else if (prop == "AnnealCoolingRatio") + obj->setAnnealCoolingRatio(rhs[3].toDouble()); + else if (prop == "AnnealItePerStep") + obj->setAnnealItePerStep(rhs[3].toInt()); else mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized property %s", prop.c_str()); From 71db979064c8c2026bab62249834e8cd8bc4dd4e Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 00:30:24 +0200 Subject: [PATCH 13/36] dnn: update Net class - new option in shrinkCaffeModel method - new function NMSBoxes - rename forwardAll to forwardAndRetrieve - drop forwardOpt method (was never implemented) --- +cv/Net.m | 101 +++++++++++++++++++++----------------- src/+cv/private/Net_.cpp | 50 ++++++++++++++----- test/unit_tests/TestNet.m | 2 +- 3 files changed, 95 insertions(+), 58 deletions(-) diff --git a/+cv/Net.m b/+cv/Net.m index d2204c37c..82c4b7c51 100644 --- a/+cv/Net.m +++ b/+cv/Net.m @@ -14,7 +14,7 @@ % computations (i. e. network testing). A network training is in principle % not supported. % - % https://github.com/opencv/opencv/wiki/Deep-Learning-in-OpenCV + % [Wiki](https://github.com/opencv/opencv/wiki/Deep-Learning-in-OpenCV) % % ## Net class % Neural network is presented as directed acyclic graph (DAG), where @@ -44,7 +44,7 @@ % net = cv.Net('Caffe', prototxt) % net = cv.Net('Caffe', prototxt, caffeModel) % - % net = cv.Net('Tensorflow', model) + % net = cv.Net('Tensorflow', modelmodel) % net = cv.Net('Tensorflow', model, config) % % net = cv.Net('Torch', filename) @@ -61,11 +61,12 @@ % * __model__ path to the `.pb` file with binary protobuf % description of the network architecture. Binary serialized % TensorFlow graph includes weights. - % * __config__ Optional path to the `.pbtxt` file with text - % definition of TensorFlow graph. More flexible than binary - % format and may be used to build the network using binary - % format only as a weights storage. This approach is similar to - % Caffe's `.prorotxt` and `.caffemodel`. + % * __config__ Optional path to the `.pbtxt` file that contains + % text graph definition in protobuf format. Resulting net is + % built by text graph using weights from a binary one. This is + % more flexible than binary format and may be used to build the + % network using binary format only as a weights storage. This + % approach is similar to Caffe's `.prorotxt` and `.caffemodel`. % * __filename__ path to the file, dumped from Torch by using % `torch.save()` function. % * __isBinary__ specifies whether the network was serialized in @@ -78,13 +79,13 @@ % The first variant creates an empty network. % % The second variant reads a network model stored in - % [Caffe](http://caffe.berkeleyvision.org) model files. + % [Caffe](http://caffe.berkeleyvision.org) framework's format. % - % The third variant is an importer of - % [TensorFlow](https://www.tensorflow.org) framework network. + % The third variant reads a network model stored in + % [TensorFlow](https://www.tensorflow.org/) framework's format. % - % The fourth variant is an importer of [Torch7](http://torch.ch) - % framework network. + % The fourth variant reads a network model stored in + % [Torch7](http://torch.ch) framework's format. % % The fifth variant reads a network model stored in % [Darknet](https://pjreddie.com/darknet/) model files. @@ -94,9 +95,6 @@ % % ### Notes for Torch % - % Warning: Torch7 importer is experimental now, you need - % explicitly set CMake flag to compile it. - % % NOTE: ASCII mode of Torch serializer is more preferable, because % binary mode extensively use `long` type of C language, which has % various bit-length on different systems. @@ -240,18 +238,18 @@ function setParam(this, layerId, numParam, blob) % listed in `outBlobNames`. It returns blobs for first outputs of % specified layers. % - % See also: cv.Net.forwardAll, cv.Net.Net + % See also: cv.Net.forwardAndRetrieve, cv.Net.Net % blob = Net_(this.id, 'forward', varargin{:}); end - function blobs = forwardAll(this, varargin) - %FORWARDALL Runs forward pass + function blobs = forwardAndRetrieve(this, varargin) + %FORWARDANDRETRIEVE Runs forward pass % - % blobs = net.forwardAll() - % blobs = net.forwardAll(outputName) + % blobs = net.forwardAndRetrieve() + % blobs = net.forwardAndRetrieve(outputName) % - % blobsArr = net.forwardAll(outBlobNames) + % blobsArr = net.forwardAndRetrieve(outBlobNames) % % ## Input % * __outputName__ name for layer which output is needed to get. @@ -276,25 +274,7 @@ function setParam(this, layerId, numParam, blob) % % See also: cv.Net.forward, cv.Net.Net % - blobs = Net_(this.id, 'forwardAll', varargin{:}); - end - - function forwardOpt(this, toLayerId) - %FORWARDOPT Optimized forward - % - % net.forwardOpt(toLayerId) - % - % ## Input - % * __toLayerId__ layer name or layer id (one or several). - % - % Makes forward only those layers which weren't changed after - % previous cv.Net.forward. - % - % Warning: Not yet implemented. - % - % See also: cv.Net.forward - % - Net_(this.id, 'forwardOpt', toLayerId); + blobs = Net_(this.id, 'forwardAndRetrieve', varargin{:}); end function [timings, total] = getPerfProfile(this) @@ -682,8 +662,8 @@ function setPreferableTarget(this, target) % blob = cv.Net.blobFromImages(..., 'OptionName',optionValue, ...) % % ## Input - % * __img__ input image (with 1- or 3-channels). - % * __imgs__ input images (all with 1- or 3-channels). + % * __img__ input image (with 1-, 3- or 4-channels). + % * __imgs__ input images (all with 1-, 3- or 4-channels). % % ## Output % * __blob__ 4-dimansional array with NCHW dimensions order. @@ -721,10 +701,11 @@ function setPreferableTarget(this, target) blob = Net_(0, 'blobFromImages', img, varargin{:}); end - function shrinkCaffeModel(src, dst) + function shrinkCaffeModel(src, dst, varargin) %SHRINKCAFFEMODEL Convert all weights of Caffe network to half precision floating point % % cv.Net.shrinkCaffeModel(src, dst) + % cv.Net.shrinkCaffeModel(..., 'OptionName',optionValue, ...) % % ## Input % * __src__ Path to origin model from Caffe framework contains @@ -732,13 +713,45 @@ function shrinkCaffeModel(src, dst) % `.caffemodel` extension). % * __dst__ Path to destination model with updated weights. % + % ## Options + % * __LayersTypes__ Set of layers types which parameters will be + % converted. By default (not set), converts only Convolutional + % and Fully-Connected layers' weights, + % i.e `{'Convolution', 'InnerProduct'}`. + % % Note: Shrinked model has no origin `float32` weights so it can't % be used in origin Caffe framework anymore. However the structure % of data is taken from NVidia's % . So the resulting % model may be used there. % - Net_(0, 'shrinkCaffeModel', src, dst); + Net_(0, 'shrinkCaffeModel', src, dst, varargin{:}); + end + + function indices = NMSBoxes(bboxes, scores, score_threshold, nms_threshold, varargin) + %NMSBOXES Performs non-maximum suppression given boxes and corresponding scores + % + % indices = cv.Net.NMSBoxes(bboxes, scores, score_threshold, nms_threshold) + % indices = cv.Net.NMSBoxes(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __bboxes__ a set of bounding boxes to apply NMS. + % * __scores__ a set of corresponding confidences. + % * **score_threshold** a threshold used to filter boxes by score. + % * **nms_threshold** a threshold used in non maximum suppression. + % + % ## Output + % * __indices__ the kept indices of bboxes after NMS. + % + % ## Options + % * __Eta__ a coefficient in adaptive threshold formula: + % `nms_threshold_{i+1} = eta * nms_threshold_{i}`. default 1.0 + % * __TopK__ if `> 0`, keep at most `TopK` picked indices. + % default 0 + % + % See also: cv.groupRectangles + % + indices = Net_(0, 'NMSBoxes', bboxes, scores, score_threshold, nms_threshold, varargin{:}); end end end diff --git a/src/+cv/private/Net_.cpp b/src/+cv/private/Net_.cpp index 282ab7445..83a24c4e6 100644 --- a/src/+cv/private/Net_.cpp +++ b/src/+cv/private/Net_.cpp @@ -336,10 +336,44 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) return; } else if (method == "shrinkCaffeModel") { - nargchk(nrhs==4 && nlhs==0); + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs==0); + vector layersTypes; + for (int i=4; i(); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized option %s", key.c_str()); + } string src(rhs[2].toString()), dst(rhs[3].toString()); - shrinkCaffeModel(src, dst); + shrinkCaffeModel(src, dst, + vector(layersTypes.begin(), layersTypes.end())); + return; + } + else if (method == "NMSBoxes") { + nargchk(nrhs>=6 && (nrhs%2)==0 && nlhs<=1); + float eta = 1.0f; + int top_k = 0; + for (int i=6; i bboxes(rhs[2].toVector()); + vector scores(rhs[3].toVector()); + float score_threshold = rhs[4].toFloat(); + float nms_threshold = rhs[5].toFloat(); + vector indices; + NMSBoxes(bboxes, scores, score_threshold, nms_threshold, indices, + eta, top_k); + plhs[0] = MxArray(indices); return; } @@ -434,7 +468,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) plhs[0] = MxArray(outputBlobs); } } - else if (method == "forwardAll") { + else if (method == "forwardAndRetrieve") { nargchk((nrhs==2 || nrhs==3) && nlhs<=1); if (nrhs == 2 || rhs[2].isChar()) { string outputName; @@ -452,16 +486,6 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) plhs[0] = MxArray(outputBlobs); } } - /* - //TODO: linking error; unresolved external symbol - else if (method == "forwardOpt") { - nargchk(nrhs==3 && nlhs==0); - if (rhs[2].numel() == 1) - obj->forwardOpt(MxArrayToLayerId(rhs[2])); - else - obj->forwardOpt(MxArrayToVectorLayerId(rhs[2])); - } - */ else if (method == "setHalideScheduler") { nargchk(nrhs==3 && nlhs==0); obj->setHalideScheduler(rhs[2].toString()); diff --git a/test/unit_tests/TestNet.m b/test/unit_tests/TestNet.m index 2abadb372..5fda10b61 100644 --- a/test/unit_tests/TestNet.m +++ b/test/unit_tests/TestNet.m @@ -48,7 +48,7 @@ blob = cv.Net.blobFromImages({img1,img2}, 'Size',[224 224]); net.setInput(blob, 'data'); - blobs = net.forwardAll('conv1/7x7_s2'); + blobs = net.forwardAndRetrieve('conv1/7x7_s2'); end function test_shrink_caffe_fp16 From 23e422e4952312c23b6996816a1964294e5b8ca8 Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 15:14:01 +0200 Subject: [PATCH 14/36] datasets: update in split function make sure string is not empty before indexing, otherwise use space as default value --- opencv_contrib/src/+cv/private/Dataset_.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencv_contrib/src/+cv/private/Dataset_.cpp b/opencv_contrib/src/+cv/private/Dataset_.cpp index 799d47510..a8edec64c 100644 --- a/opencv_contrib/src/+cv/private/Dataset_.cpp +++ b/opencv_contrib/src/+cv/private/Dataset_.cpp @@ -918,7 +918,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) else if (method == "split") { nargchk(nrhs==5 && nlhs<=1); string s(rhs[3].toString()); - char delim = rhs[4].toString()[0]; + char delim = (!rhs[4].isEmpty()) ? rhs[4].toString()[0] : ' '; vector elems; cv::datasets::split(s, elems, delim); plhs[0] = MxArray(elems); From 624b18b33fc5447ebec274744fd0aaf9bda65d2c Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 14:22:30 +0200 Subject: [PATCH 15/36] ximgproc: DisparityWLSFilter input disparity depth accepts either CV_16S or CV_32F --- opencv_contrib/+cv/DisparityWLSFilter.m | 20 +++++++++---------- .../src/+cv/private/DisparityWLSFilter_.cpp | 15 +++++++------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/opencv_contrib/+cv/DisparityWLSFilter.m b/opencv_contrib/+cv/DisparityWLSFilter.m index 03ade6dc9..756821fd4 100644 --- a/opencv_contrib/+cv/DisparityWLSFilter.m +++ b/opencv_contrib/+cv/DisparityWLSFilter.m @@ -190,11 +190,11 @@ function load(this, fname_or_str, varargin) % % ## Input % * **disparity_map_left** disparity map of the left view, - % 1-channel `int16` type. Implicitly assumes that disparity - % values are scaled by 16 (one-pixel disparity corresponds to - % the value of 16 in the disparity map). Disparity map can have - % any resolution, it will be automatically resized to fit - % `left_view` resolution. + % 1-channel `int16` or `single` type. Implicitly assumes that + % disparity values are scaled by 16 (one-pixel disparity + % corresponds to the value of 16 in the disparity map). Disparity + % map can have any resolution, it will be automatically resized + % to fit `left_view` resolution. % * **disparity_map_right** optional argument, some % implementations might also use the disparity map of the right % view to compute confidence maps, for instance. Pass an empty @@ -337,8 +337,8 @@ function load(this, fname_or_str, varargin) % mse = cv.DisparityWLSFilter.computeMSE(..., 'OptionName',optionValue, ...) % % ## Input - % * __GT__ ground truth disparity map (`int16`). - % * __src__ disparity map to evaluate (`int16`). + % * __GT__ ground truth disparity map (`int16` or `single`). + % * __src__ disparity map to evaluate (`int16` or `single`). % % ## Output % * __mse__ returns mean square error between `GT` and `src`. @@ -358,8 +358,8 @@ function load(this, fname_or_str, varargin) % prcnt = cv.DisparityWLSFilter.computeBadPixelPercent(..., 'OptionName',optionValue, ...) % % ## Input - % * __GT__ ground truth disparity map (`int16`). - % * __src__ disparity map to evaluate (`int16`). + % * __GT__ ground truth disparity map (`int16` or `single`). + % * __src__ disparity map to evaluate (`int16` or `single`). % % ## Output % * __prcnt__ returns percent of "bad" pixels between `GT` and @@ -383,7 +383,7 @@ function load(this, fname_or_str, varargin) % dst = cv.DisparityWLSFilter.getDisparityVis(src, 'OptionName',optionValue, ...) % % ## Input - % * __src__ input disparity map (`int16` depth). + % * __src__ input disparity map (`int16` or `single` depth). % % ## Output % * __dst__ output visualization (clamped `uint8` image). diff --git a/opencv_contrib/src/+cv/private/DisparityWLSFilter_.cpp b/opencv_contrib/src/+cv/private/DisparityWLSFilter_.cpp index 82c401354..5b2f1d025 100644 --- a/opencv_contrib/src/+cv/private/DisparityWLSFilter_.cpp +++ b/opencv_contrib/src/+cv/private/DisparityWLSFilter_.cpp @@ -304,8 +304,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - Mat GT(rhs[2].toMat(CV_16S)), - src(rhs[3].toMat(CV_16S)); + Mat GT(rhs[2].toMat(rhs[2].isInt16() ? CV_16S : CV_32F)), + src(rhs[3].toMat(rhs[3].isInt16() ? CV_16S : CV_32F)); if (ROI.area() == 0) ROI = Rect(0, 0, src.cols, src.rows); double mse = computeMSE(GT, src, ROI); @@ -326,8 +326,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - Mat GT(rhs[2].toMat(CV_16S)), - src(rhs[3].toMat(CV_16S)); + Mat GT(rhs[2].toMat(rhs[2].isInt16() ? CV_16S : CV_32F)), + src(rhs[3].toMat(rhs[3].isInt16() ? CV_16S : CV_32F)); if (ROI.area() == 0) ROI = Rect(0, 0, src.cols, src.rows); double prcnt = computeBadPixelPercent(GT, src, ROI, thresh); @@ -345,7 +345,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - Mat src(rhs[2].toMat(CV_16S)), dst; + Mat src(rhs[2].toMat(rhs[2].isInt16() ? CV_16S : CV_32F)), + dst; getDisparityVis(src, dst, scale); plhs[0] = MxArray(dst); return; @@ -421,8 +422,8 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - Mat disparity_map_left(rhs[2].toMat(CV_16S)), - disparity_map_right(rhs[3].toMat(CV_16S)), + Mat disparity_map_left(rhs[2].toMat(rhs[2].isInt16() ? CV_16S : CV_32F)), + disparity_map_right(rhs[3].toMat(rhs[3].isInt16() ? CV_16S : CV_32F)), left_view(rhs[4].toMat(CV_8U)), filtered_disparity_map; obj->filter(disparity_map_left, left_view, filtered_disparity_map, From c2bd912a6cf8a9535220cf8c7ded47573fd363bc Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 14:33:31 +0200 Subject: [PATCH 16/36] xfeatures2d: new Sigma option for LATCH controls Gaussian blur --- opencv_contrib/+cv/LATCH.m | 6 +++++- src/mexopencv_features2d.cpp | 5 ++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/opencv_contrib/+cv/LATCH.m b/opencv_contrib/+cv/LATCH.m index 5717f7ce5..2271642a2 100644 --- a/opencv_contrib/+cv/LATCH.m +++ b/opencv_contrib/+cv/LATCH.m @@ -39,6 +39,9 @@ % example, if we would like to compare triplets of patches of % size 7x7x then the `half_ssd_size` should be `(7-1)/2 = 3`. % default 3 + % * __Sigma__ sigma value for cv.GaussianBlur smoothing of the + % source image. Source image will be used without smoothing in + % case sigma value is 0. default 2.0 % % See also: cv.LATCH.compute % @@ -212,7 +215,8 @@ function load(this, fname_or_str, varargin) % [descriptors, keypoints] = obj.compute(imgs, keypoints) % % ## Input - % * __img__ Image (first variant), 8-bit grayscale image. + % * __img__ Image (first variant), 8-bit grayscale image + % (color images are converted as such). % * __imgs__ Image set (second variant), cell array of images. % * __keypoints__ Input collection of keypoints. Keypoints for % which a descriptor cannot be computed are removed. Sometimes diff --git a/src/mexopencv_features2d.cpp b/src/mexopencv_features2d.cpp index f51a0f2b5..5fd08403c 100644 --- a/src/mexopencv_features2d.cpp +++ b/src/mexopencv_features2d.cpp @@ -547,6 +547,7 @@ Ptr createLATCH( int bytes = 32; bool rotationInvariance = true; int half_ssd_size = 3; + double sigma = 2.0; for (; first != last; first += 2) { string key(first->toString()); const MxArray& val = *(first + 1); @@ -556,11 +557,13 @@ Ptr createLATCH( rotationInvariance = val.toBool(); else if (key == "HalfSize") half_ssd_size = val.toInt(); + else if (key == "Sigma") + sigma = val.toDouble(); else mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized option %s", key.c_str()); } - return LATCH::create(bytes, rotationInvariance, half_ssd_size); + return LATCH::create(bytes, rotationInvariance, half_ssd_size, sigma); } Ptr createDAISY( From 4572fd0d2a04f38a78a43c8a2483c5231da02f45 Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 15:11:57 +0200 Subject: [PATCH 17/36] xfeatures2d: opencv bug was fixed in FASTForPointSet --- opencv_contrib/src/+cv/FASTForPointSet.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/opencv_contrib/src/+cv/FASTForPointSet.cpp b/opencv_contrib/src/+cv/FASTForPointSet.cpp index 4b33aef96..eec7a14e9 100644 --- a/opencv_contrib/src/+cv/FASTForPointSet.cpp +++ b/opencv_contrib/src/+cv/FASTForPointSet.cpp @@ -11,9 +11,6 @@ using namespace std; using namespace cv; using namespace cv::xfeatures2d; -//TODO: due to a bug in opencv, function always returns empty keypoints -// https://github.com/opencv/opencv_contrib/pull/1435 - namespace { /// FAST neighborhood types const ConstMap FASTTypeMap = ConstMap From c241a80bcb4b2cb0a37d013ebce83854a1ab68ff Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 14:47:28 +0200 Subject: [PATCH 18/36] img_hash: updates to ImgHash class - input images expected to be 8-bit - algorithm name ("klass" member) only needed in ctor call --- opencv_contrib/+cv/ImgHash.m | 27 +++++----- opencv_contrib/src/+cv/private/ImgHash_.cpp | 55 +++++++++++---------- 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/opencv_contrib/+cv/ImgHash.m b/opencv_contrib/+cv/ImgHash.m index 55ccccca5..dc8bdb771 100644 --- a/opencv_contrib/+cv/ImgHash.m +++ b/opencv_contrib/+cv/ImgHash.m @@ -73,8 +73,6 @@ properties (SetAccess = private) % Object ID id - % Object class - klass end %% Constructor/destructor @@ -121,8 +119,7 @@ % % See also: cv.ImgHash.compute, cv.ImgHash.compare % - this.klass = alg; - this.id = ImgHash_(0, 'new', this.klass, varargin{:}); + this.id = ImgHash_(0, 'new', alg, varargin{:}); end function delete(this) @@ -133,7 +130,7 @@ function delete(this) % See also: cv.ImgHash % if isempty(this.id), return; end - ImgHash_(this.id, 'delete', this.klass); + ImgHash_(this.id, 'delete'); end function typename = typeid(this) @@ -144,7 +141,7 @@ function delete(this) % ## Output % * __typename__ Name of C++ type % - typename = ImgHash_(this.id, 'typeid', this.klass); + typename = ImgHash_(this.id, 'typeid'); end end @@ -156,14 +153,14 @@ function delete(this) % hash = obj.compute(img) % % ## Input - % * __img__ input image want to compute hash value. + % * __img__ input image wanting to compute its hash value. % % ## Output % * __hash__ hash of the image. % % See also: cv.ImgHash.compare % - hash = ImgHash_(this.id, 'compute', this.klass, img); + hash = ImgHash_(this.id, 'compute', img); end function val = compare(this, hashOne, hashTwo) @@ -181,7 +178,7 @@ function delete(this) % % See also: cv.ImgHash.compute % - val = ImgHash_(this.id, 'compare', this.klass, hashOne, hashTwo); + val = ImgHash_(this.id, 'compare', hashOne, hashTwo); end end @@ -202,7 +199,7 @@ function delete(this) % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % - hash = ImgHash_(0, 'averageHash', '', img); + hash = ImgHash_(0, 'averageHash', img); end function hash = blockMeanHash(img, varargin) @@ -228,7 +225,7 @@ function delete(this) % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % - hash = ImgHash_(0, 'blockMeanHash', '', img, varargin{:}); + hash = ImgHash_(0, 'blockMeanHash', img, varargin{:}); end function hash = colorMomentHash(img) @@ -247,7 +244,7 @@ function delete(this) % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % - hash = ImgHash_(0, 'colorMomentHash', '', img); + hash = ImgHash_(0, 'colorMomentHash', img); end function hash = marrHildrethHash(img, varargin) @@ -270,7 +267,7 @@ function delete(this) % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % - hash = ImgHash_(0, 'marrHildrethHash', '', img, varargin{:}); + hash = ImgHash_(0, 'marrHildrethHash', img, varargin{:}); end function hash = pHash(img) @@ -287,7 +284,7 @@ function delete(this) % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % - hash = ImgHash_(0, 'pHash', '', img); + hash = ImgHash_(0, 'pHash', img); end function hash = radialVarianceHash(img, varargin) @@ -310,7 +307,7 @@ function delete(this) % % See also: cv.ImgHash.ImgHash, cv.ImgHash.compute % - hash = ImgHash_(0, 'radialVarianceHash', '', img, varargin{:}); + hash = ImgHash_(0, 'radialVarianceHash', img, varargin{:}); end end diff --git a/opencv_contrib/src/+cv/private/ImgHash_.cpp b/opencv_contrib/src/+cv/private/ImgHash_.cpp index 0b5173014..18e3ee80b 100644 --- a/opencv_contrib/src/+cv/private/ImgHash_.cpp +++ b/opencv_contrib/src/+cv/private/ImgHash_.cpp @@ -122,34 +122,34 @@ Ptr createImgHashBase( void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Check the number of arguments - nargchk(nrhs>=3 && nlhs<=1); + nargchk(nrhs>=2 && nlhs<=1); // Argument vector vector rhs(prhs, prhs+nrhs); int id = rhs[0].toInt(); string method(rhs[1].toString()); - string klass(rhs[2].toString()); // Constructor is called. Create a new object from argument if (method == "new") { nargchk(nrhs>=3 && nlhs<=1); - obj_[++last_id] = createImgHashBase(klass, rhs.begin() + 3, rhs.end()); + obj_[++last_id] = createImgHashBase( + rhs[2].toString(), rhs.begin() + 3, rhs.end()); plhs[0] = MxArray(last_id); mexLock(); return; } // static methods calls else if (method == "averageHash") { - nargchk(nrhs==4 && nlhs<=1); - Mat img(rhs[3].toMat()), hash; + nargchk(nrhs==3 && nlhs<=1); + Mat img(rhs[2].toMat(CV_8U)), hash; averageHash(img, hash); plhs[0] = MxArray(hash); return; } else if (method == "blockMeanHash") { - nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=1); + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); int mode = BLOCK_MEAN_HASH_MODE_0; - for (int i=4; i=4 && (nrhs%2)==0 && nlhs<=1); + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); float alpha = 2.0f; float scale = 1.0f; - for (int i=4; i=4 && (nrhs%2)==0 && nlhs<=1); + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); double sigma = 1; int numOfAngleLine = 180; - for (int i=4; icompute(img, hash); plhs[0] = MxArray(hash); } else if (method == "compare") { - nargchk(nrhs==5 && nlhs<=1); - Mat hashOne(rhs[3].toMat()), - hashTwo(rhs[4].toMat()); + nargchk(nrhs==4 && nlhs<=1); + Mat hashOne(rhs[2].toMat()), + hashTwo(rhs[3].toMat()); // hashes CV_8U or CV_64F double val = obj->compare(hashOne, hashTwo); plhs[0] = MxArray(val); } + //TODO: expose derived-class specific methods: + // BlockMeanHash::getMean + // RadialVarianceHash::getFeatures + // RadialVarianceHash::getPixPerLine + // RadialVarianceHash::getProjection else mexErrMsgIdAndTxt("mexopencv:error", "Unrecognized operation %s",method.c_str()); From be1f0a9b68be26fbe259a8e40d8ea596601ca321 Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 16:36:03 +0200 Subject: [PATCH 19/36] bgsegm: two new algorithms + one util class - new LSBP and GSOC classes - new synthetic sequence generator class - new demo + update existing demo --- opencv_contrib/+cv/BackgroundSubtractorGSOC.m | 194 +++++++++++++++ opencv_contrib/+cv/BackgroundSubtractorLSBP.m | 225 ++++++++++++++++++ .../+cv/SyntheticSequenceGenerator.m | 156 ++++++++++++ .../samples/BackgroundSubtractorDemo.m | 21 +- .../samples/bgsegm_synthetic_seq_demo.m | 107 +++++++++ .../+cv/private/BackgroundSubtractorGSOC_.cpp | 176 ++++++++++++++ .../+cv/private/BackgroundSubtractorLSBP_.cpp | 194 +++++++++++++++ .../private/SyntheticSequenceGenerator_.cpp | 136 +++++++++++ .../unit_tests/TestBackgroundSubtractorGSOC.m | 24 ++ .../unit_tests/TestBackgroundSubtractorLSBP.m | 37 +++ .../TestSyntheticSequenceGenerator.m | 17 ++ 11 files changed, 1279 insertions(+), 8 deletions(-) create mode 100644 opencv_contrib/+cv/BackgroundSubtractorGSOC.m create mode 100644 opencv_contrib/+cv/BackgroundSubtractorLSBP.m create mode 100644 opencv_contrib/+cv/SyntheticSequenceGenerator.m create mode 100644 opencv_contrib/samples/bgsegm_synthetic_seq_demo.m create mode 100644 opencv_contrib/src/+cv/private/BackgroundSubtractorGSOC_.cpp create mode 100644 opencv_contrib/src/+cv/private/BackgroundSubtractorLSBP_.cpp create mode 100644 opencv_contrib/src/+cv/private/SyntheticSequenceGenerator_.cpp create mode 100644 opencv_contrib/test/unit_tests/TestBackgroundSubtractorGSOC.m create mode 100644 opencv_contrib/test/unit_tests/TestBackgroundSubtractorLSBP.m create mode 100644 opencv_contrib/test/unit_tests/TestSyntheticSequenceGenerator.m diff --git a/opencv_contrib/+cv/BackgroundSubtractorGSOC.m b/opencv_contrib/+cv/BackgroundSubtractorGSOC.m new file mode 100644 index 000000000..0bde999dc --- /dev/null +++ b/opencv_contrib/+cv/BackgroundSubtractorGSOC.m @@ -0,0 +1,194 @@ +classdef BackgroundSubtractorGSOC < handle + %BACKGROUNDSUBTRACTORGSOC Background Subtraction implemented during GSOC + % + % Implementation of the different (compared to cv.BackgroundSubtractorLSBP) + % yet better algorithm which is called GSOC, as it was implemented during + % GSOC and was not originated from any paper. + % + % This algorithm demonstrates better performance on CDnet 2014 dataset + % compared to other algorithms in OpenCV. + % + % See also: cv.BackgroundSubtractorGSOC.BackgroundSubtractorGSOC, + % cv.BackgroundSubtractorGSOC.apply, + % cv.BackgroundSubtractorGSOC.getBackgroundImage + % + + properties (SetAccess = private) + % Object ID + id + end + + %% BackgroundSubtractor + methods + function this = BackgroundSubtractorGSOC(varargin) + %BACKGROUNDSUBTRACTORGSOC Creates a GSOC Background Subtractor + % + % bs = cv.BackgroundSubtractorGSOC() + % bs = cv.BackgroundSubtractorGSOC('OptionName', optionValue, ...) + % + % ## Options + % * __MotionCompensation__ Whether to use camera motion + % compensation. One of: + % * __None__ (default) + % * __LK__ + % * __NSamples__ Number of samples to maintain at each point of + % the frame. default 20 + % * __ReplaceRate__ Probability of replacing the old sample, i.e + % how fast the model will update itself. default 0.003 + % * __PropagationRate__ Probability of propagating to neighbors. + % default 0.01 + % * __HitsThreshold__ How many positives the sample must get + % before it will be considered as a possible replacement. + % default 32 + % * __Alpha__ Scale coefficient for threshold. default 0.01 + % * __Beta__ Bias coefficient for threshold. default 0.0022 + % * __BlinkingSupressionDecay__ Blinking supression decay factor. + % default 0.1 + % * __BlinkingSupressionMultiplier__ Blinking supression + % multiplier. default 0.1 + % * __NoiseRemovalThresholdFacBG__ Strength of the noise removal + % for background points. default 0.0004 + % * __NoiseRemovalThresholdFacFG__ Strength of the noise removal + % for foreground points. default 0.0008 + % + % See also: cv.BackgroundSubtractorGSOC + % + this.id = BackgroundSubtractorGSOC_(0, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % bs.delete() + % + % See also: cv.BackgroundSubtractorGSOC + % + if isempty(this.id), return; end + BackgroundSubtractorGSOC_(this.id, 'delete'); + end + + function fgmask = apply(this, im, varargin) + %APPLY Updates the background model and computes the foreground mask + % + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % + % ## Input + % * __im__ Next video frame. + % + % ## Output + % * __fgmask__ The output foreground mask as an 8-bit binary image + % (0 for background, 255 for foregound). + % + % ## Options + % * __LearningRate__ The value between 0 and 1 that indicates how + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 + % + % See also: cv.BackgroundSubtractorGSOC.getBackgroundImage + % + fgmask = BackgroundSubtractorGSOC_(this.id, 'apply', im, varargin{:}); + end + + function bgImg = getBackgroundImage(this) + %GETBACKGROUNDIMAGE Computes a background image + % + % bgImg = bs.getBackgroundImage() + % + % ## Output + % * __bgImg__ The output background image. + % + % See also: cv.BackgroundSubtractorGSOC.apply + % + bgImg = BackgroundSubtractorGSOC_(this.id, 'getBackgroundImage'); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.BackgroundSubtractorGSOC.empty + % + BackgroundSubtractorGSOC_(this.id, 'clear'); + end + + function b = empty(this) + %EMPTY Returns true if the algorithm is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the algorithm is empty (e.g. in the very + % beginning or after unsuccessful read). + % + % See also: cv.BackgroundSubtractorGSOC.clear + % + b = BackgroundSubtractorGSOC_(this.id, 'empty'); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.BackgroundSubtractorGSOC.save, + % cv.BackgroundSubtractorGSOC.load + % + name = BackgroundSubtractorGSOC_(this.id, 'getDefaultName'); + end + + function save(this, filename) + %SAVE Saves the algorithm to a file + % + % obj.save(filename) + % + % ## Input + % * __filename__ Name of the file to save to. + % + % This method stores the algorithm parameters in a file storage. + % + % See also: cv.BackgroundSubtractorGSOC.load + % + BackgroundSubtractorGSOC_(this.id, 'save', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads algorithm from a file or a string + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __fname__ Name of the file to read. + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __ObjName__ The optional name of the node to read (if empty, + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % This method reads algorithm parameters from a file storage. + % The previous model state is discarded. + % + % See also: cv.BackgroundSubtractorGSOC.save + % + BackgroundSubtractorGSOC_(this.id, 'load', fname_or_str, varargin{:}); + end + end + +end diff --git a/opencv_contrib/+cv/BackgroundSubtractorLSBP.m b/opencv_contrib/+cv/BackgroundSubtractorLSBP.m new file mode 100644 index 000000000..6fd9dd33c --- /dev/null +++ b/opencv_contrib/+cv/BackgroundSubtractorLSBP.m @@ -0,0 +1,225 @@ +classdef BackgroundSubtractorLSBP < handle + %BACKGROUNDSUBTRACTORLSBP Background Subtraction using Local SVD Binary Pattern + % + % More details about the algorithm can be found at [LGuo2016]. + % + % It is based on LSBP feature descriptors and achieves state-of-the-art + % performance on the CDnet 2012 dataset. LSBP descriptors are particularly + % good in regions with illumination variation, noise and shadows. So, this + % algorithm has better performance in this kind of regions. + % + % After extraction of LSBP descriptors, the algorithm processes frames + % pixel-wise (i.e independently). Thus the implementation is parallelized + % and fast enough for real-time processing. + % + % ## References + % [LGuo2016]: + % > L. Guo, D. Xu, and Z. Qiang. "Background Subtraction using Local SVD + % > Binary Pattern". In 2016 IEEE Conference on Computer Vision and + % > Pattern Recognition Workshops (CVPRW), pages 1159-1167, June 2016. + % > [PDF](http://www.cv-foundation.org/openaccess/content_cvpr_2016_workshops/w24/papers/Guo_Background_Subtraction_Using_CVPR_2016_paper.pdf) + % + % See also: cv.BackgroundSubtractorLSBP.BackgroundSubtractorLSBP, + % cv.BackgroundSubtractorLSBP.apply, + % cv.BackgroundSubtractorLSBP.getBackgroundImage + % + + properties (SetAccess = private) + % Object ID + id + end + + %% BackgroundSubtractor + methods + function this = BackgroundSubtractorLSBP(varargin) + %BACKGROUNDSUBTRACTORLSBP Creates a LSBP Background Subtractor + % + % bs = cv.BackgroundSubtractorLSBP() + % bs = cv.BackgroundSubtractorLSBP('OptionName', optionValue, ...) + % + % ## Options + % * __MotionCompensation__ Whether to use camera motion + % compensation. One of: + % * __None__ (default) + % * __LK__ + % * __NSamples__ Number of samples to maintain at each point of + % the frame. default 20 + % * __LSBPRadius__ LSBP descriptor radius. default 16 + % * __TLower__ Lower bound for T-values. See [LGuo2016] for + % details. default 2.0 + % * __TUpper__ Upper bound for T-values. See [LGuo2016] for + % details. default 32.0 + % * __TInc__ Increase step for T-values. See [LGuo2016] for + % details. default 1.0 + % * __TDec__ Decrease step for T-values. See [LGuo2016] for + % details. default 0.05 + % * __RScale__ Scale coefficient for threshold values. default 10.0 + % * __RIncDec__ Increase/Decrease step for threshold values. + % default 0.005 + % * __NoiseRemovalThresholdFacBG__ Strength of the noise removal + % for background points. default 0.0004 + % * __NoiseRemovalThresholdFacFG__ Strength of the noise removal + % for foreground points. default 0.0008 + % * __LSBPThreshold__ Threshold for LSBP binary string. default 8 + % * __MinCount__ Minimal number of matches for sample to be + % considered as foreground. default 2 + % + % See also: cv.BackgroundSubtractorLSBP + % + this.id = BackgroundSubtractorLSBP_(0, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % bs.delete() + % + % See also: cv.BackgroundSubtractorLSBP + % + if isempty(this.id), return; end + BackgroundSubtractorLSBP_(this.id, 'delete'); + end + + function fgmask = apply(this, im, varargin) + %APPLY Updates the background model and computes the foreground mask + % + % fgmask = bs.apply(im) + % fgmask = bs.apply(im, 'OptionName', optionValue, ...) + % + % ## Input + % * __im__ Next video frame. + % + % ## Output + % * __fgmask__ The output foreground mask as an 8-bit binary image + % (0 for background, 255 for foregound). + % + % ## Options + % * __LearningRate__ The value between 0 and 1 that indicates how + % fast the background model is learnt. Negative parameter value + % makes the algorithm to use some automatically chosen learning + % rate. 0 means that the background model is not updated at all, + % 1 means that the background model is completely reinitialized + % from the last frame. default -1 + % + % See also: cv.BackgroundSubtractorLSBP.getBackgroundImage + % + fgmask = BackgroundSubtractorLSBP_(this.id, 'apply', im, varargin{:}); + end + + function bgImg = getBackgroundImage(this) + %GETBACKGROUNDIMAGE Computes a background image + % + % bgImg = bs.getBackgroundImage() + % + % ## Output + % * __bgImg__ The output background image. + % + % See also: cv.BackgroundSubtractorLSBP.apply + % + bgImg = BackgroundSubtractorLSBP_(this.id, 'getBackgroundImage'); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.BackgroundSubtractorLSBP.empty + % + BackgroundSubtractorLSBP_(this.id, 'clear'); + end + + function b = empty(this) + %EMPTY Returns true if the algorithm is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the algorithm is empty (e.g. in the very + % beginning or after unsuccessful read). + % + % See also: cv.BackgroundSubtractorLSBP.clear + % + b = BackgroundSubtractorLSBP_(this.id, 'empty'); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.BackgroundSubtractorLSBP.save, + % cv.BackgroundSubtractorLSBP.load + % + name = BackgroundSubtractorLSBP_(this.id, 'getDefaultName'); + end + + function save(this, filename) + %SAVE Saves the algorithm to a file + % + % obj.save(filename) + % + % ## Input + % * __filename__ Name of the file to save to. + % + % This method stores the algorithm parameters in a file storage. + % + % See also: cv.BackgroundSubtractorLSBP.load + % + BackgroundSubtractorLSBP_(this.id, 'save', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads algorithm from a file or a string + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __fname__ Name of the file to read. + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __ObjName__ The optional name of the node to read (if empty, + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % This method reads algorithm parameters from a file storage. + % The previous model state is discarded. + % + % See also: cv.BackgroundSubtractorLSBP.save + % + BackgroundSubtractorLSBP_(this.id, 'load', fname_or_str, varargin{:}); + end + end + + %% BackgroundSubtractorLSBPDesc + methods (Static) + function desc = computeLSBPDesc(frame, LSBPSamplePoints) + %COMPUTELSBPDESC This is for calculation of the LSBP descriptors + % + % desc = cv.BackgroundSubtractorLSBP.computeLSBPDesc(frame, LSBPSamplePoints) + % + % ## Input + % * __frame__ input frame + % * __LSBPSamplePoints__ 32 sample points + % + % ## Output + % * __desc__ LSBP descriptors + % + desc = BackgroundSubtractorLSBP_(0, 'computeLSBPDesc', frame, LSBPSamplePoints); + end + end + +end diff --git a/opencv_contrib/+cv/SyntheticSequenceGenerator.m b/opencv_contrib/+cv/SyntheticSequenceGenerator.m new file mode 100644 index 000000000..020d162fe --- /dev/null +++ b/opencv_contrib/+cv/SyntheticSequenceGenerator.m @@ -0,0 +1,156 @@ +classdef SyntheticSequenceGenerator < handle + %SYNTHETICSEQUENCEGENERATOR Synthetic frame sequence generator for testing background subtraction algorithms + % + % It will generate the moving object on top of the background. + % It will apply some distortion to the background to make the test more + % complex. + % + % See also: cv.BackgroundSubtractorLSBP, cv.BackgroundSubtractorGSOC + % + + properties (SetAccess = private) + % Object ID + id + end + + %% SyntheticSequenceGenerator + methods + function this = SyntheticSequenceGenerator(background, object, varargin) + %SYNTHETICSEQUENCEGENERATOR Creates an instance of SyntheticSequenceGenerator + % + % obj = cv.SyntheticSequenceGenerator(background, object) + % obj = cv.SyntheticSequenceGenerator(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __background__ Background image for object. + % * __object__ Object image which will move slowly over the + % background. + % + % ## Options + % * __Amplitude__ Amplitude of wave distortion applied to + % background. default 2.0 + % * __Wavelength__ Length of waves in distortion applied to + % background. default 20.0 + % * __Wavespeed__ How fast waves will move. default 0.2 + % * __Objspeed__ How fast object will fly over background. + % default 6.0 + % + % See also: cv.SyntheticSequenceGenerator.getNextFrame + % + this.id = SyntheticSequenceGenerator_(0, 'new', background, object, varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % obj.delete() + % + % See also: cv.SyntheticSequenceGenerator + % + if isempty(this.id), return; end + SyntheticSequenceGenerator_(this.id, 'delete'); + end + + function [frame, gtMask] = getNextFrame(this) + %GETNEXTFRAME Obtain the next frame in the sequence + % + % [frame, gtMask] = obj.getNextFrame() + % + % ## Output + % * __frame__ Output frame. + % * __gtMask__ Output ground-truth (reference) segmentation mask + % object/background. + % + [frame, gtMask] = SyntheticSequenceGenerator_(this.id, 'getNextFrame'); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.SyntheticSequenceGenerator.empty, + % cv.SyntheticSequenceGenerator.load + % + SyntheticSequenceGenerator_(this.id, 'clear'); + end + + function b = empty(this) + %EMPTY Returns true if the algorithm is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the object is empty (e.g in the + % very beginning or after unsuccessful read). + % + % See also: cv.SyntheticSequenceGenerator.clear, + % cv.SyntheticSequenceGenerator.load + % + b = SyntheticSequenceGenerator_(this.id, 'empty'); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.SyntheticSequenceGenerator.save, + % cv.SyntheticSequenceGenerator.load + % + name = SyntheticSequenceGenerator_(this.id, 'getDefaultName'); + end + + function save(this, filename) + %SAVE Saves the algorithm parameters to a file + % + % obj.save(filename) + % + % ## Input + % * __filename__ Name of the file to save to. + % + % This method stores the algorithm parameters in the specified + % XML or YAML file. + % + % See also: cv.SyntheticSequenceGenerator.load + % + SyntheticSequenceGenerator_(this.id, 'save', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads algorithm from a file or a string + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __fname__ Name of the file to read. + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __ObjName__ The optional name of the node to read (if empty, + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % This method reads algorithm parameters from the specified XML or + % YAML file (either from disk or serialized string). The previous + % algorithm state is discarded. + % + % See also: cv.SyntheticSequenceGenerator.save + % + SyntheticSequenceGenerator_(this.id, 'load', fname_or_str, varargin{:}); + end + end + +end diff --git a/opencv_contrib/samples/BackgroundSubtractorDemo.m b/opencv_contrib/samples/BackgroundSubtractorDemo.m index ea5d96bc1..8d608467e 100644 --- a/opencv_contrib/samples/BackgroundSubtractorDemo.m +++ b/opencv_contrib/samples/BackgroundSubtractorDemo.m @@ -144,18 +144,23 @@ %% % Create BG subtractor object +nHistory = 50; if true - bs = cv.BackgroundSubtractorMOG(... - 'History',50, 'NMixtures',5, 'BackgroundRatio',0.2, 'NoiseSigma',7); + bs = cv.BackgroundSubtractorMOG('History',nHistory, ... + 'NMixtures',5, 'BackgroundRatio',0.2, 'NoiseSigma',7); elseif true - bs = cv.BackgroundSubtractorMOG2('History',50); + bs = cv.BackgroundSubtractorMOG2('History',nHistory); +elseif true + bs = cv.BackgroundSubtractorKNN('History',nHistory); elseif true - bs = cv.BackgroundSubtractorKNN('History',50); -elseif false bs = cv.BackgroundSubtractorGMG(... 'InitializationFrames',20, 'DecisionThreshold',0.7); -elseif false +elseif true bs = cv.BackgroundSubtractorCNT(); +elseif true + bs = cv.BackgroundSubtractorLSBP(); +else + bs = cv.BackgroundSubtractorGSOC(); end %% @@ -180,7 +185,7 @@ % used for updating the background, or use -1 to instruct the algorithm % to automatically chose a learning rate. hImg = imshow(im); -for t = 1:bs.History +for t = 1:nHistory % Get an image im = cap.read(); if isempty(im), break; end @@ -191,7 +196,7 @@ % Show current frame and progress set(hImg, 'CData',im); - title(sprintf('%d / %d', t, bs.History)); + title(sprintf('%d / %d', t, nHistory)); drawnow; end disp('Finished.'); diff --git a/opencv_contrib/samples/bgsegm_synthetic_seq_demo.m b/opencv_contrib/samples/bgsegm_synthetic_seq_demo.m new file mode 100644 index 000000000..a3b8ea72d --- /dev/null +++ b/opencv_contrib/samples/bgsegm_synthetic_seq_demo.m @@ -0,0 +1,107 @@ +%% Evaluation of background subtraction algorithms on synthetic sequence +% +% Sources: +% +% * +% * +% * +% + +%% +% several presets with different settings are available, +% tradeoff between quality metrics and speed. +if true + % GSOC + bs = cv.BackgroundSubtractorGSOC(); +elseif true + % GSOC-camera-motion-compensation + bs = cv.BackgroundSubtractorGSOC('MotionCompensation','LK'); +elseif true + % LSBP-vanilla + bs = cv.BackgroundSubtractorLSBP('NSamples',20, 'LSBPRadius',4, ... + 'TLower',2.0, 'TUpper',200.0, 'TInc',1.0, 'TDec',0.05, ... + 'RScale',5.0, 'RIncDec',0.05, 'LSBPThreshold',8); +elseif true + % LSBP-speed + bs = cv.BackgroundSubtractorLSBP('NSamples',10, 'LSBPRadius',16, ... + 'TLower',2.0, 'TUpper',32.0, 'TInc',1.0, 'TDec',0.05, ... + 'RScale',10.0, 'RIncDec',0.005, 'LSBPThreshold',8); +elseif true + % LSBP-quality + bs = cv.BackgroundSubtractorLSBP('NSamples',20, 'LSBPRadius',16, ... + 'TLower',2.0, 'TUpper',32.0, 'TInc',1.0, 'TDec',0.05, ... + 'RScale',10.0, 'RIncDec',0.005, 'LSBPThreshold',8); +elseif true + % LSBP-camera-motion-compensation + bs = cv.BackgroundSubtractorLSBP('MotionCompensation','LK'); +elseif true + % MOG2 + bs = cv.BackgroundSubtractorMOG2(); +elseif true + % KNN + bs = cv.BackgroundSubtractorKNN(); +elseif true + % MOG + bs = cv.BackgroundSubtractorMOG(); +elseif true + % GMG + bs = cv.BackgroundSubtractorGMG(); +else + % CNT + bs = cv.BackgroundSubtractorCNT(); +end + +%% +% initialize frame sequence generator +bg = imread(fullfile(mexopencv.root(), 'test', 'fruits.jpg')); +fg = imread(fullfile(mexopencv.root(), 'test', 'img001.jpg')); +fg = cv.resize(fg, [100 100]); +gen = cv.SyntheticSequenceGenerator(bg, fg); + +%% +% prepare UI +[frame, gtMask] = gen.getNextFrame(); +subplot(221), hImg(1) = imshow(frame); title('frame') +subplot(222), hImg(2) = imshow(gtMask); title('ground-truth mask') +subplot(223), hImg(3) = imshow(frame); title('BG model') +subplot(224), hImg(4) = imshow(gtMask); title('FG mask') + +%% +% main loop +f1 = 0; +count = 0; +for n=1:400 + % grab new frame and ground-truth mask + [frame, gtMask] = gen.getNextFrame(); + + % background subtraction + mask = bs.apply(frame); + try + im = bs.getBackgroundImage(); + catch + % some algorithms dont implement a BG model (MOG, GMG) + im = []; + end + + % show results + set(hImg(1), 'CData',frame) + set(hImg(2), 'CData',gtMask) + set(hImg(3), 'CData',im) + set(hImg(4), 'CData',mask) + drawnow + + % give the algorithm some time for proper background model inference. + % Almost all background subtraction algorithms have a problem with cold + % start and require some time for background model initialization. + % So we will not count first part of the frames in the score. + if n > 300 + tp = nnz(cv.bitwise_and(mask == 255, gtMask == 255)); + fp = nnz(cv.bitwise_and(mask == 255, gtMask == 0)); + fn = nnz(cv.bitwise_and(mask == 0, gtMask == 255)); + if (tp + fn + fp) > 0 + f1 = f1 + 2*tp / (2*tp + fn + fp); + count = count + 1; + xlabel(sprintf('avg F1 score = %.2f', f1 / count)) + end + end +end diff --git a/opencv_contrib/src/+cv/private/BackgroundSubtractorGSOC_.cpp b/opencv_contrib/src/+cv/private/BackgroundSubtractorGSOC_.cpp new file mode 100644 index 000000000..47f2d891d --- /dev/null +++ b/opencv_contrib/src/+cv/private/BackgroundSubtractorGSOC_.cpp @@ -0,0 +1,176 @@ +/** + * @file BackgroundSubtractorGSOC_.cpp + * @brief mex interface for cv::bgsegm::BackgroundSubtractorGSOC + * @ingroup bgsegm + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/bgsegm.hpp" +using namespace std; +using namespace cv; +using namespace cv::bgsegm; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; + +/// motion compensation types for option processing +const ConstMap MotionCompensationsMap = ConstMap + ("None", cv::bgsegm::LSBP_CAMERA_MOTION_COMPENSATION_NONE) + ("LK", cv::bgsegm::LSBP_CAMERA_MOTION_COMPENSATION_LK); +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + string method(rhs[1].toString()); + + // constructor call + if (method == "new") { + nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); + int mc = cv::bgsegm::LSBP_CAMERA_MOTION_COMPENSATION_NONE; + int nSamples = 20; + float replaceRate = 0.003f; + float propagationRate = 0.01f; + int hitsThreshold = 32; + float alpha = 0.01f; + float beta = 0.0022f; + float blinkingSupressionDecay = 0.1f; + float blinkingSupressionMultiplier = 0.1f; + float noiseRemovalThresholdFacBG = 0.0004f; + float noiseRemovalThresholdFacFG = 0.0008f; + for (int i=2; i obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==2 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==2 && nlhs==0); + obj->clear(); + } + else if (method == "save") { + nargchk(nrhs==3 && nlhs==0); + obj->save(rhs[2].toString()); + } + else if (method == "load") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=3; i(rhs[2].toString(), objname) : + Algorithm::load(rhs[2].toString(), objname)); + */ + ///* + // HACK: workaround for missing BackgroundSubtractorGSOC::create() + FileStorage fs(rhs[2].toString(), FileStorage::READ + + (loadFromString ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + FileNode fn(objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]); + if (fn.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to get node"); + obj->read(fn); + //*/ + } + else if (method == "empty") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "apply") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); + double learningRate = -1; + for (int i=3; iapply(image, fgmask, learningRate); + plhs[0] = MxArray(fgmask); + } + else if (method == "getBackgroundImage") { + nargchk(nrhs==2 && nlhs<=1); + Mat backgroundImage; + obj->getBackgroundImage(backgroundImage); + plhs[0] = MxArray(backgroundImage); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/src/+cv/private/BackgroundSubtractorLSBP_.cpp b/opencv_contrib/src/+cv/private/BackgroundSubtractorLSBP_.cpp new file mode 100644 index 000000000..4ad84e18e --- /dev/null +++ b/opencv_contrib/src/+cv/private/BackgroundSubtractorLSBP_.cpp @@ -0,0 +1,194 @@ +/** + * @file BackgroundSubtractorLSBP_.cpp + * @brief mex interface for cv::bgsegm::BackgroundSubtractorLSBP + * @ingroup bgsegm + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/bgsegm.hpp" +using namespace std; +using namespace cv; +using namespace cv::bgsegm; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; + +/// motion compensation types for option processing +const ConstMap MotionCompensationsMap = ConstMap + ("None", cv::bgsegm::LSBP_CAMERA_MOTION_COMPENSATION_NONE) + ("LK", cv::bgsegm::LSBP_CAMERA_MOTION_COMPENSATION_LK); +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + string method(rhs[1].toString()); + + // constructor call + if (method == "new") { + nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); + int mc = cv::bgsegm::LSBP_CAMERA_MOTION_COMPENSATION_NONE; + int nSamples = 20; + int LSBPRadius = 16; + float Tlower = 2.0f; + float Tupper = 32.0f; + float Tinc = 1.0f; + float Tdec = 0.05f; + float Rscale = 10.0f; + float Rincdec = 0.005f; + float noiseRemovalThresholdFacBG = 0.0004f; + float noiseRemovalThresholdFacFG = 0.0008f; + int LSBPthreshold = 8; + int minCount = 2; + for (int i=2; i LSBPSamplePoints(rhs[3].toVector()); + if (LSBPSamplePoints.size() != 32) + mexErrMsgIdAndTxt("mexopencv:error", "32 points required"); + BackgroundSubtractorLSBPDesc::compute(desc, frame, &LSBPSamplePoints[0]); + plhs[0] = MxArray(desc); + return; + } + + // Big operation switch + Ptr obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==2 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==2 && nlhs==0); + obj->clear(); + } + else if (method == "save") { + nargchk(nrhs==3 && nlhs==0); + obj->save(rhs[2].toString()); + } + else if (method == "load") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=3; i(rhs[2].toString(), objname) : + Algorithm::load(rhs[2].toString(), objname)); + */ + ///* + // HACK: workaround for missing BackgroundSubtractorLSBP::create() + FileStorage fs(rhs[2].toString(), FileStorage::READ + + (loadFromString ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + FileNode fn(objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]); + if (fn.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to get node"); + obj->read(fn); + //*/ + } + else if (method == "empty") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "apply") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); + double learningRate = -1; + for (int i=3; iapply(image, fgmask, learningRate); + plhs[0] = MxArray(fgmask); + } + else if (method == "getBackgroundImage") { + nargchk(nrhs==2 && nlhs<=1); + Mat backgroundImage; + obj->getBackgroundImage(backgroundImage); + plhs[0] = MxArray(backgroundImage); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/src/+cv/private/SyntheticSequenceGenerator_.cpp b/opencv_contrib/src/+cv/private/SyntheticSequenceGenerator_.cpp new file mode 100644 index 000000000..3a09c5d8e --- /dev/null +++ b/opencv_contrib/src/+cv/private/SyntheticSequenceGenerator_.cpp @@ -0,0 +1,136 @@ +/** + * @file SyntheticSequenceGenerator_.cpp + * @brief mex interface for cv::bgsegm::SyntheticSequenceGenerator + * @ingroup bgsegm + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/bgsegm.hpp" +using namespace std; +using namespace cv; +using namespace cv::bgsegm; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && nlhs<=2); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + string method(rhs[1].toString()); + + // constructor call + if (method == "new") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=1); + double amplitude = 2.0; + double wavelength = 20.0; + double wavespeed = 0.2; + double objspeed = 6.0; + for (int i=4; i obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==2 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==2 && nlhs==0); + obj->clear(); + } + else if (method == "load") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=3; i(rhs[2].toString(), objname) : + Algorithm::load(rhs[2].toString(), objname)); + */ + ///* + // HACK: workaround for missing SyntheticSequenceGenerator::create() + FileStorage fs(rhs[2].toString(), FileStorage::READ + + (loadFromString ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + FileNode fn(objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]); + if (fn.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to get node"); + obj->read(fn); + //*/ + } + else if (method == "save") { + nargchk(nrhs==3 && nlhs==0); + obj->save(rhs[2].toString()); + } + else if (method == "empty") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "getNextFrame") { + nargchk(nrhs==2 && nlhs<=2); + Mat frame, gtMask; + obj->getNextFrame(frame, gtMask); + plhs[0] = MxArray(frame); + if (nlhs > 1) + plhs[1] = MxArray(gtMask); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/test/unit_tests/TestBackgroundSubtractorGSOC.m b/opencv_contrib/test/unit_tests/TestBackgroundSubtractorGSOC.m new file mode 100644 index 000000000..e5d96964a --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestBackgroundSubtractorGSOC.m @@ -0,0 +1,24 @@ +classdef TestBackgroundSubtractorGSOC + %TestBackgroundSubtractorGSOC + + methods (Static) + function test_1 + frame = randi([0 255], [50 50 3], 'uint8'); + sz = size(frame); + + bs = cv.BackgroundSubtractorGSOC(); + for i=1:10 + fgmask = bs.apply(frame, 'LearningRate',-1); + end + + frame(1:10,1:10,:) = 255; + fgmask = bs.apply(frame, 'LearningRate',0); + validateattributes(fgmask, {'uint8'}, {'size',sz(1:2)}); + assert(numel(unique(fgmask)) <= 2); % 0=bg, 255=fg + + bg = bs.getBackgroundImage(); + validateattributes(bg, {'uint8'}, {'size',sz}); + end + end + +end diff --git a/opencv_contrib/test/unit_tests/TestBackgroundSubtractorLSBP.m b/opencv_contrib/test/unit_tests/TestBackgroundSubtractorLSBP.m new file mode 100644 index 000000000..22059f0ff --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestBackgroundSubtractorLSBP.m @@ -0,0 +1,37 @@ +classdef TestBackgroundSubtractorLSBP + %TestBackgroundSubtractorLSBP + + methods (Static) + function test_1 + frame = randi([0 255], [50 50 3], 'uint8'); + sz = size(frame); + + bs = cv.BackgroundSubtractorLSBP(); + for i=1:10 + fgmask = bs.apply(frame, 'LearningRate',-1); + end + + frame(1:10,1:10,:) = 255; + fgmask = bs.apply(frame, 'LearningRate',0); + validateattributes(fgmask, {'uint8'}, {'size',sz(1:2)}); + assert(numel(unique(fgmask)) <= 2); % 0=bg, 255=fg + + bg = bs.getBackgroundImage(); + validateattributes(bg, {'uint8'}, {'size',sz}); + end + + function test_2 + img = imread(fullfile(mexopencv.root(), 'test', 'lena.jpg')); + img = single(img) / 255; + sz = size(img); + + radius = 4; + phi = 2*pi * (0:31)'/32; + pts = fix(radius * [cos(phi) sin(phi)]); + + desc = cv.BackgroundSubtractorLSBP.computeLSBPDesc(img, pts); + validateattributes(desc, {'int32'}, {'size',sz(1:2)}); + end + end + +end diff --git a/opencv_contrib/test/unit_tests/TestSyntheticSequenceGenerator.m b/opencv_contrib/test/unit_tests/TestSyntheticSequenceGenerator.m new file mode 100644 index 000000000..05ebcdb54 --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestSyntheticSequenceGenerator.m @@ -0,0 +1,17 @@ +classdef TestSyntheticSequenceGenerator + %TestSyntheticSequenceGenerator + + methods (Static) + function test_1 + bg = imread(fullfile(mexopencv.root(), 'test', 'fruits.jpg')); + fg = imread(fullfile(mexopencv.root(), 'test', 'img001.jpg')); + fg = cv.resize(fg, [100, 100]); + + gen = cv.SyntheticSequenceGenerator(bg, fg); + for i=1:5 + [frame, gtMask] = gen.getNextFrame(); + end + end + end + +end From 1eb7b835cb149d15b96be9a2f0fee45ee8091758 Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 16:48:45 +0200 Subject: [PATCH 20/36] ximgproc: new EdgeBoxes --- opencv_contrib/+cv/EdgeBoxes.m | 278 ++++++++++++++++++ .../samples/structured_edge_detection_demo.m | 19 +- opencv_contrib/src/+cv/private/EdgeBoxes_.cpp | 221 ++++++++++++++ .../test/unit_tests/TestEdgeBoxes.m | 35 +++ 4 files changed, 549 insertions(+), 4 deletions(-) create mode 100644 opencv_contrib/+cv/EdgeBoxes.m create mode 100644 opencv_contrib/src/+cv/private/EdgeBoxes_.cpp create mode 100644 opencv_contrib/test/unit_tests/TestEdgeBoxes.m diff --git a/opencv_contrib/+cv/EdgeBoxes.m b/opencv_contrib/+cv/EdgeBoxes.m new file mode 100644 index 000000000..40d9b91db --- /dev/null +++ b/opencv_contrib/+cv/EdgeBoxes.m @@ -0,0 +1,278 @@ +classdef EdgeBoxes < handle + %EDGEBOXES Class implementing Edge Boxes algorithm + % + % Algorithm from [ZitnickECCV14edgeBoxes]. + % + % ## References + % [ZitnickECCV14edgeBoxes]: + % > C. Lawrence Zitnick and Piotr Dollar. "Edge boxes: Locating object + % > proposals from edges". In ECCV, 2014. + % > [PDF](https://www.microsoft.com/en-us/research/wp-content/uploads/2014/09/ZitnickDollarECCV14edgeBoxes.pdf) + % + % See also: cv.EdgeBoxes.EdgeBoxes, cv.StructuredEdgeDetection + % + + properties (SetAccess = private) + % Object ID + id + end + + properties (Dependent) + % The step size of sliding window search. + Alpha + % The NMS threshold for object proposals. + Beta + % The adaptation rate for NMS threshold. + Eta + % The min score of boxes to detect. + MinScore + % The max number of boxes to detect. + MaxBoxes + % The edge min magnitude. + EdgeMinMag + % The edge merge threshold. + EdgeMergeThr + % The cluster min magnitude. + ClusterMinMag + % The max aspect ratio of boxes. + MaxAspectRatio + % The minimum area of boxes. + MinBoxArea + % The affinity sensitivity. + Gamma + % The scale sensitivity. + Kappa + end + + %% EdgeBoxes + methods + function this = EdgeBoxes(varargin) + %EDGEBOXES Creates instance of Edgeboxes + % + % obj = cv.EdgeBoxes() + % obj = cv.EdgeBoxes('OptionName',optionValue, ...) + % + % ## Options + % * __Alpha__ step size of sliding window search. default 0.65 + % * __Beta__ NMS threshold for object proposals. default 0.75 + % * __Eta__ adaptation rate for NMS threshold. default 1 + % * __MinScore__ min score of boxes to detect. default 0.01 + % * __MaxBoxes__ max number of boxes to detect. default 10000 + % * __EdgeMinMag__ edge min magnitude. Increase to trade off + % accuracy for speed. default 0.1 + % * __EdgeMergeThr__ edge merge threshold. Increase to trade off + % accuracy for speed. default 0.5 + % * __ClusterMinMag__ cluster min magnitude. Increase to trade off + % accuracy for speed. default 0.5 + % * __MaxAspectRatio__ max aspect ratio of boxes. default 3 + % * __MinBoxArea__ minimum area of boxes. default 1000 + % * __Gamma__ affinity sensitivity. default 2 + % * __Kappa__ scale sensitivity. default 1.5 + % + % See also: cv.EdgeBoxes.getBoundingBoxes + % + this.id = EdgeBoxes_(0, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % obj.delete() + % + % See also: cv.EdgeBoxes + % + if isempty(this.id), return; end + EdgeBoxes_(this.id, 'delete'); + end + + function boxes = getBoundingBoxes(this, edgeMap, orientationMap) + %GETBOUNDINGBOXES Returns array containing proposal boxes. + % + % boxes = obj.getBoundingBoxes(edgeMap, orientationMap) + % + % ## Input + % * __edgeMap__ edge image. + % * __orientationMap__ orientation map. + % + % ## Output + % * __boxes__ proposal boxes. + % + % See also: cv.EdgeBoxes.amFilter + % + boxes = EdgeBoxes_(this.id, 'getBoundingBoxes', edgeMap, orientationMap); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.EdgeBoxes.empty, cv.EdgeBoxes.load + % + EdgeBoxes_(this.id, 'clear'); + end + + function b = empty(this) + %EMPTY Checks if algorithm object is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the algorithm object is empty + % (e.g. in the very beginning or after unsuccessful read). + % + % See also: cv.EdgeBoxes.clear, cv.EdgeBoxes.load + % + b = EdgeBoxes_(this.id, 'empty'); + end + + function save(this, filename) + %SAVE Saves the algorithm parameters to a file + % + % obj.save(filename) + % + % ## Input + % * __filename__ Name of the file to save to. + % + % This method stores the algorithm parameters in the specified + % XML or YAML file. + % + % See also: cv.EdgeBoxes.load + % + EdgeBoxes_(this.id, 'save', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads algorithm from a file or a string + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __fname__ Name of the file to read. + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __ObjName__ The optional name of the node to read (if empty, + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % This method reads algorithm parameters from the specified XML or + % YAML file (either from disk or serialized string). The previous + % algorithm state is discarded. + % + % See also: cv.EdgeBoxes.save + % + EdgeBoxes_(this.id, 'load', fname_or_str, varargin{:}); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.EdgeBoxes.save, cv.EdgeBoxes.load + % + name = EdgeBoxes_(this.id, 'getDefaultName'); + end + end + + %% Getters/Setters + methods + function value = get.Alpha(this) + value = EdgeBoxes_(this.id, 'get', 'Alpha'); + end + function set.Alpha(this, value) + EdgeBoxes_(this.id, 'set', 'Alpha', value); + end + + function value = get.Beta(this) + value = EdgeBoxes_(this.id, 'get', 'Beta'); + end + function set.Beta(this, value) + EdgeBoxes_(this.id, 'set', 'Beta', value); + end + + function value = get.Eta(this) + value = EdgeBoxes_(this.id, 'get', 'Eta'); + end + function set.Eta(this, value) + EdgeBoxes_(this.id, 'set', 'Eta', value); + end + + function value = get.MinScore(this) + value = EdgeBoxes_(this.id, 'get', 'MinScore'); + end + function set.MinScore(this, value) + EdgeBoxes_(this.id, 'set', 'MinScore', value); + end + + function value = get.MaxBoxes(this) + value = EdgeBoxes_(this.id, 'get', 'MaxBoxes'); + end + function set.MaxBoxes(this, value) + EdgeBoxes_(this.id, 'set', 'MaxBoxes', value); + end + + function value = get.EdgeMinMag(this) + value = EdgeBoxes_(this.id, 'get', 'EdgeMinMag'); + end + function set.EdgeMinMag(this, value) + EdgeBoxes_(this.id, 'set', 'EdgeMinMag', value); + end + + function value = get.EdgeMergeThr(this) + value = EdgeBoxes_(this.id, 'get', 'EdgeMergeThr'); + end + function set.EdgeMergeThr(this, value) + EdgeBoxes_(this.id, 'set', 'EdgeMergeThr', value); + end + + function value = get.ClusterMinMag(this) + value = EdgeBoxes_(this.id, 'get', 'ClusterMinMag'); + end + function set.ClusterMinMag(this, value) + EdgeBoxes_(this.id, 'set', 'ClusterMinMag', value); + end + + function value = get.MaxAspectRatio(this) + value = EdgeBoxes_(this.id, 'get', 'MaxAspectRatio'); + end + function set.MaxAspectRatio(this, value) + EdgeBoxes_(this.id, 'set', 'MaxAspectRatio', value); + end + + function value = get.MinBoxArea(this) + value = EdgeBoxes_(this.id, 'get', 'MinBoxArea'); + end + function set.MinBoxArea(this, value) + EdgeBoxes_(this.id, 'set', 'MinBoxArea', value); + end + + function value = get.Gamma(this) + value = EdgeBoxes_(this.id, 'get', 'Gamma'); + end + function set.Gamma(this, value) + EdgeBoxes_(this.id, 'set', 'Gamma', value); + end + + function value = get.Kappa(this) + value = EdgeBoxes_(this.id, 'get', 'Kappa'); + end + function set.Kappa(this, value) + EdgeBoxes_(this.id, 'set', 'Kappa', value); + end + end + +end diff --git a/opencv_contrib/samples/structured_edge_detection_demo.m b/opencv_contrib/samples/structured_edge_detection_demo.m index b3eec8a92..1cf8b48b4 100644 --- a/opencv_contrib/samples/structured_edge_detection_demo.m +++ b/opencv_contrib/samples/structured_edge_detection_demo.m @@ -1,12 +1,15 @@ %% Structured Edge Detection demo -% This sample demonstrates structured forests for fast edge detection. +% This sample demonstrates structured forests for fast edge detection, and +% edgeboxes. % % The structered edge demo requires you to provide a model. % This demo downloads a model from the opencv_extra repository on Github. % % Sources: % -% * +% * +% * +% * % %% Load image @@ -46,9 +49,17 @@ %% % suppress edges -edge_nms = pDollar.edgesNms(edges, orientation_map); +edges_nms = pDollar.edgesNms(edges, orientation_map); + +%% +% generate object bounding box proposals using edges +ebx = cv.EdgeBoxes('MaxBoxes',30); +boxes = ebx.getBoundingBoxes(edges_nms, orientation_map); +out = cv.rectangle(img, boxes, 'Color',[0 255 0], 'LineType','AA'); %% Display result +figure('Position',[200 200 800 600]) subplot(221), imshow(img), title('image') subplot(222), imshow(e8u), title('edges') -subplot(223), imshow(edge_nms), title('edges NMS') +subplot(223), imshow(edges_nms), title('edges NMS') +subplot(224), imshow(out), title('object proposals') diff --git a/opencv_contrib/src/+cv/private/EdgeBoxes_.cpp b/opencv_contrib/src/+cv/private/EdgeBoxes_.cpp new file mode 100644 index 000000000..741c7d51b --- /dev/null +++ b/opencv_contrib/src/+cv/private/EdgeBoxes_.cpp @@ -0,0 +1,221 @@ +/** + * @file EdgeBoxes_.cpp + * @brief mex interface for cv::ximgproc::EdgeBoxes + * @ingroup ximgproc + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/ximgproc.hpp" +using namespace std; +using namespace cv; +using namespace cv::ximgproc; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + string method(rhs[1].toString()); + + // Constructor is called. Create a new object from argument + if (method == "new") { + nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); + float alpha = 0.65f; + float beta = 0.75f; + float eta = 1; + float minScore = 0.01f; + int maxBoxes = 10000; + float edgeMinMag = 0.1f; + float edgeMergeThr = 0.5f; + float clusterMinMag = 0.5f; + float maxAspectRatio = 3; + float minBoxArea = 1000; + float gamma = 2; + float kappa = 1.5f; + for (int i=2; i obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==2 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==2 && nlhs==0); + obj->clear(); + } + else if (method == "load") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=3; i(rhs[2].toString(), objname) : + Algorithm::load(rhs[2].toString(), objname)); + */ + ///* + // HACK: workaround for missing EdgeBoxes::create() + FileStorage fs(rhs[2].toString(), FileStorage::READ + + (loadFromString ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + FileNode fn(objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]); + if (fn.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to get node"); + obj->read(fn); + //*/ + } + else if (method == "save") { + nargchk(nrhs==3 && nlhs==0); + obj->save(rhs[2].toString()); + } + else if (method == "empty") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "getBoundingBoxes") { + nargchk(nrhs==4 && nlhs<=1); + Mat edge_map(rhs[2].toMat(CV_32F)), + orientation_map(rhs[3].toMat(CV_32F)); + vector boxes; + obj->getBoundingBoxes(edge_map, orientation_map, boxes); + plhs[0] = MxArray(boxes); + } + else if (method == "get") { + nargchk(nrhs==3 && nlhs<=1); + string prop(rhs[2].toString()); + if (prop == "Alpha") + plhs[0] = MxArray(obj->getAlpha()); + else if (prop == "Beta") + plhs[0] = MxArray(obj->getBeta()); + else if (prop == "Eta") + plhs[0] = MxArray(obj->getEta()); + else if (prop == "MinScore") + plhs[0] = MxArray(obj->getMinScore()); + else if (prop == "MaxBoxes") + plhs[0] = MxArray(obj->getMaxBoxes()); + else if (prop == "EdgeMinMag") + plhs[0] = MxArray(obj->getEdgeMinMag()); + else if (prop == "EdgeMergeThr") + plhs[0] = MxArray(obj->getEdgeMergeThr()); + else if (prop == "ClusterMinMag") + plhs[0] = MxArray(obj->getClusterMinMag()); + else if (prop == "MaxAspectRatio") + plhs[0] = MxArray(obj->getMaxAspectRatio()); + else if (prop == "MinBoxArea") + plhs[0] = MxArray(obj->getMinBoxArea()); + else if (prop == "Gamma") + plhs[0] = MxArray(obj->getGamma()); + else if (prop == "Kappa") + plhs[0] = MxArray(obj->getKappa()); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized property %s", prop.c_str()); + } + else if (method == "set") { + nargchk(nrhs==4 && nlhs==0); + string prop(rhs[2].toString()); + if (prop == "Alpha") + obj->setAlpha(rhs[3].toFloat()); + else if (prop == "Beta") + obj->setBeta(rhs[3].toFloat()); + else if (prop == "Eta") + obj->setEta(rhs[3].toFloat()); + else if (prop == "MinScore") + obj->setMinScore(rhs[3].toFloat()); + else if (prop == "MaxBoxes") + obj->setMaxBoxes(rhs[3].toInt()); + else if (prop == "EdgeMinMag") + obj->setEdgeMinMag(rhs[3].toFloat()); + else if (prop == "EdgeMergeThr") + obj->setEdgeMergeThr(rhs[3].toFloat()); + else if (prop == "ClusterMinMag") + obj->setClusterMinMag(rhs[3].toFloat()); + else if (prop == "MaxAspectRatio") + obj->setMaxAspectRatio(rhs[3].toFloat()); + else if (prop == "MinBoxArea") + obj->setMinBoxArea(rhs[3].toFloat()); + else if (prop == "Gamma") + obj->setGamma(rhs[3].toFloat()); + else if (prop == "Kappa") + obj->setKappa(rhs[3].toFloat()); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized property %s", prop.c_str()); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/test/unit_tests/TestEdgeBoxes.m b/opencv_contrib/test/unit_tests/TestEdgeBoxes.m new file mode 100644 index 000000000..2fda9cc4c --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestEdgeBoxes.m @@ -0,0 +1,35 @@ +classdef TestEdgeBoxes + %TestEdgeBoxes + + methods (Static) + function test_1 + [E,O] = get_inputs(); + obj = cv.EdgeBoxes('MaxBoxes',30); + obj.MaxBoxes = 30; + boxes = obj.getBoundingBoxes(E, O); + validateattributes(boxes, {'cell'}, {'vector'}); + cellfun(@(r) validateattributes(r, {'numeric'}, ... + {'vector', 'numel',4}), boxes); + end + end + +end + +function [E,O] = get_inputs() + img = imread(fullfile(mexopencv.root(),'test','balloon.jpg')); + img = single(img) / 255; + + pDollar = cv.StructuredEdgeDetection(get_model_file()); + E = pDollar.detectEdges(img); + O = pDollar.computeOrientation(E); + E = pDollar.edgesNms(E, O); +end + +function fname = get_model_file() + fname = fullfile(mexopencv.root(),'test','model.yml.gz'); + if exist(fname, 'file') ~= 2 + % download model from GitHub + url = 'https://cdn.rawgit.com/opencv/opencv_extra/3.2.0/testdata/cv/ximgproc/model.yml.gz'; + urlwrite(url, fname); + end +end From 88d7068add102eb92e5e4396f200629a064c4f9b Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 16:50:52 +0200 Subject: [PATCH 21/36] ximgproc: new RidgeDetectionFilter --- opencv_contrib/+cv/RidgeDetectionFilter.m | 166 ++++++++++++++++++ .../src/+cv/private/RidgeDetectionFilter_.cpp | 133 ++++++++++++++ .../unit_tests/TestRidgeDetectionFilter.m | 14 ++ 3 files changed, 313 insertions(+) create mode 100644 opencv_contrib/+cv/RidgeDetectionFilter.m create mode 100644 opencv_contrib/src/+cv/private/RidgeDetectionFilter_.cpp create mode 100644 opencv_contrib/test/unit_tests/TestRidgeDetectionFilter.m diff --git a/opencv_contrib/+cv/RidgeDetectionFilter.m b/opencv_contrib/+cv/RidgeDetectionFilter.m new file mode 100644 index 000000000..fb6a1ee58 --- /dev/null +++ b/opencv_contrib/+cv/RidgeDetectionFilter.m @@ -0,0 +1,166 @@ +classdef RidgeDetectionFilter < handle + %RIDGEDETECTIONFILTER Ridge Detection Filter + % + % Implements Ridge detection similar to the one in + % [Mathematica](http://reference.wolfram.com/language/ref/RidgeFilter.html) + % using the eigen values from the Hessian Matrix of the input image using + % Sobel Derivatives. Additional refinement can be done using + % Skeletonization and Binarization. + % + % See also: cv.RidgeDetectionFilter.RidgeDetectionFilter, cv.Sobel, + % cv.threshold, cv.getStructuringElement, cv.morphologyEx + % + + properties (SetAccess = private) + % Object ID + id + end + + %% RidgeDetectionFilter + methods + function this = RidgeDetectionFilter(varargin) + %RIDGEDETECTIONFILTER Creates instance of the Ridge detection filter + % + % obj = cv.RidgeDetectionFilter() + % obj = cv.RidgeDetectionFilter('OptionName',optionValue, ...) + % + % ## Options + % * __DDepth__ Specifies output image depth, one of `single` or + % `double`. Default is `single` + % * __Dx__ Order of derivative x, default is 1 + % * __Dy__ Order of derivative y, default is 1 + % * __KSize__ Sobel kernel size (1, 3, 5, or 7), default is 3 + % * __OutDType__ Converted format for output, default is `uint8` + % * __Scale__ Optional scale value for derivative values, + % default is 1 + % * __Delta__ Optional bias added to output, default is 0 + % * __BorderType__ Pixel extrapolation method. + % See cv.copyMakeBorder. Default is 'Default' + % + % Above options have the same meaning as cv.Sobel options. + % + % See also: cv.RidgeDetectionFilter.getRidgeFilteredImage + % + this.id = RidgeDetectionFilter_(0, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % obj.delete() + % + % See also: cv.RidgeDetectionFilter + % + if isempty(this.id), return; end + RidgeDetectionFilter_(this.id, 'delete'); + end + + function out = getRidgeFilteredImage(this, img) + %GETRIDGEFILTEREDIMAGE Apply Ridge detection filter on input image + % + % out = obj.getRidgeFilteredImage(img) + % + % ## Input + % * __img__ Input array as supported by cv.Sobel. `img` can be + % 1-channel or 3-channels (color images are converted to + % grayscale). + % + % ## Output + % * __out__ Output array of depth as specified in `DDepth` in + % constructor. Output image with ridges. + % + % See also: cv.RidgeDetectionFilter.amFilter + % + out = RidgeDetectionFilter_(this.id, 'getRidgeFilteredImage', img); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.RidgeDetectionFilter.empty, + % cv.RidgeDetectionFilter.load + % + RidgeDetectionFilter_(this.id, 'clear'); + end + + function b = empty(this) + %EMPTY Checks if algorithm object is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the algorithm object is empty + % (e.g. in the very beginning or after unsuccessful read). + % + % See also: cv.RidgeDetectionFilter.clear, + % cv.RidgeDetectionFilter.load + % + b = RidgeDetectionFilter_(this.id, 'empty'); + end + + function save(this, filename) + %SAVE Saves the algorithm parameters to a file + % + % obj.save(filename) + % + % ## Input + % * __filename__ Name of the file to save to. + % + % This method stores the algorithm parameters in the specified + % XML or YAML file. + % + % See also: cv.RidgeDetectionFilter.load + % + RidgeDetectionFilter_(this.id, 'save', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads algorithm from a file or a string + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __fname__ Name of the file to read. + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __ObjName__ The optional name of the node to read (if empty, + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % This method reads algorithm parameters from the specified XML or + % YAML file (either from disk or serialized string). The previous + % algorithm state is discarded. + % + % See also: cv.RidgeDetectionFilter.save + % + RidgeDetectionFilter_(this.id, 'load', fname_or_str, varargin{:}); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.RidgeDetectionFilter.save, + % cv.RidgeDetectionFilter.load + % + name = RidgeDetectionFilter_(this.id, 'getDefaultName'); + end + end + +end diff --git a/opencv_contrib/src/+cv/private/RidgeDetectionFilter_.cpp b/opencv_contrib/src/+cv/private/RidgeDetectionFilter_.cpp new file mode 100644 index 000000000..94aaad281 --- /dev/null +++ b/opencv_contrib/src/+cv/private/RidgeDetectionFilter_.cpp @@ -0,0 +1,133 @@ +/** + * @file RidgeDetectionFilter_.cpp + * @brief mex interface for cv::ximgproc::RidgeDetectionFilter + * @ingroup ximgproc + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/ximgproc.hpp" +using namespace std; +using namespace cv; +using namespace cv::ximgproc; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + string method(rhs[1].toString()); + + // Constructor is called. Create a new object from argument + if (method == "new") { + nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); + int ddepth = CV_32F; + int dx = 1; + int dy = 1; + int ksize = 3; + int out_dtype = CV_8U; + double scale = 1; + double delta = 0; + int borderType = cv::BORDER_DEFAULT; + for (int i=2; i obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==2 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==2 && nlhs==0); + obj->clear(); + } + else if (method == "load") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=3; i(rhs[2].toString(), objname) : + Algorithm::load(rhs[2].toString(), objname)); + } + else if (method == "save") { + nargchk(nrhs==3 && nlhs==0); + obj->save(rhs[2].toString()); + } + else if (method == "empty") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "getRidgeFilteredImage") { + nargchk(nrhs==3 && nlhs<=1); + Mat img(rhs[2].toMat()), out; + obj->getRidgeFilteredImage(img, out); + plhs[0] = MxArray(out); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/test/unit_tests/TestRidgeDetectionFilter.m b/opencv_contrib/test/unit_tests/TestRidgeDetectionFilter.m new file mode 100644 index 000000000..2a579804b --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestRidgeDetectionFilter.m @@ -0,0 +1,14 @@ +classdef TestRidgeDetectionFilter + %TestRidgeDetectionFilter + + methods (Static) + function test_1 + img = cv.imread(fullfile(mexopencv.root(),'test','blox.jpg'), ... + 'Grayscale',true); + obj = cv.RidgeDetectionFilter('OutDType','uint8'); + out = obj.getRidgeFilteredImage(img); + validateattributes(out, {'uint8'}, {'size',size(img)}); + end + end + +end From 0b8b77461139b480379b7aa64885f6f1c1d32415 Mon Sep 17 00:00:00 2001 From: Amro Date: Wed, 31 Jan 2018 16:52:17 +0200 Subject: [PATCH 22/36] ximgproc: new BrightEdges function plus sample demo --- opencv_contrib/+cv/BrightEdges.m | 32 +++++ opencv_contrib/samples/brightedges_demo_gui.m | 110 ++++++++++++++++++ opencv_contrib/src/+cv/BrightEdges.cpp | 50 ++++++++ .../test/unit_tests/TestBrightEdges.m | 13 +++ 4 files changed, 205 insertions(+) create mode 100644 opencv_contrib/+cv/BrightEdges.m create mode 100644 opencv_contrib/samples/brightedges_demo_gui.m create mode 100644 opencv_contrib/src/+cv/BrightEdges.cpp create mode 100644 opencv_contrib/test/unit_tests/TestBrightEdges.m diff --git a/opencv_contrib/+cv/BrightEdges.m b/opencv_contrib/+cv/BrightEdges.m new file mode 100644 index 000000000..3eb8457a1 --- /dev/null +++ b/opencv_contrib/+cv/BrightEdges.m @@ -0,0 +1,32 @@ +%BRIGHTEDGES Bright edges detector +% +% edge = cv.BrightEdges(img) +% edge = cv.BrightEdges(img, 'OptionName',optionValue, ...) +% +% ## Input +% * __img__ input color image. +% +% ## Output +% * __edge__ output edge image. +% +% ## Options +% * __Contrast__ default 1 +% * __ShortRange__ default 3 +% * __LongRange__ default 9 +% +% The function implements a new way of detecting edges used is low resolution +% image object recognition in real projects (e.g. 50cm per pixel). It corrects +% surfaces for objects partially under a lighting shadow, and reveal low +% visibility edges. The result is used to feed further object detection +% processes particularly in context where small edges or details are important +% for object discrimination. +% +% The function provides an implementation of an equalized absolute difference +% of blurs, and an optional further treatment to contrast edges based on +% finding local minimum along at least two directions. The local minimum +% detection contrast is a parameter, using 10 as default (10 on 255 maximum). +% The kernel size for the gaussian and the average blur can be passed as +% parameters too. +% +% See also: cv.Canny, cv.blur, cv.GaussianBlur, cv.equalizeHist +% diff --git a/opencv_contrib/samples/brightedges_demo_gui.m b/opencv_contrib/samples/brightedges_demo_gui.m new file mode 100644 index 000000000..133015b5c --- /dev/null +++ b/opencv_contrib/samples/brightedges_demo_gui.m @@ -0,0 +1,110 @@ +%% Bright edges detection +% +% A new approach for visualizing edges. It uses the absolute difference of two +% blurs to compute gradients creating local minimas for edges. The histogram +% equalization makes the edges appears darker within a bright zone. +% +% If a contrast value of zero is given no additional process is performed. +% +% Otherwise a contrasting correction finds the local minimas and sets the edge +% to black, while setting the surrounding to white. A pixel correction +% restores line continuity and removes dark spots. +% +% The default contrast value is 1 over a range of 255. This default contrast +% ensures visualizing edges on chessboard like images or low resolution images +% where small objects edges may be needed. +% Other contrast values will look for deeper local minimas, removing smaller +% details and only leaving higher gradient variations. +% +% Souces: +% +% * +% + +function varargout = brightedges_demo_gui(im) + % load source image + if nargin < 1 + im = fullfile(mexopencv.root(),'test','butterfly.jpg'); + img = cv.imread(im, 'Color',true); + elseif ischar(im) + img = cv.imread(im, 'Color',true); + else + img = im; + end + assert(size(img,3) == 3, 'RGB image expected'); + + % create the UI + h = buildGUI(img); + if nargout > 0, varargout{1} = h; end +end + +function onChange(~,~,h) + %ONCHANGE Event handler for UI controls + + % retrieve current values from UI controls + longRange = round(get(h.slid(1), 'Value')) * 2 + 1; + shortRange = round(get(h.slid(2), 'Value')) * 2 + 1; % odd for GaussianBlur + contrst = round(get(h.slid(3), 'Value')); + set(h.txt(1), 'String',sprintf('LongRange: %d',longRange)); + set(h.txt(2), 'String',sprintf('ShortRange: %d',shortRange)); + set(h.txt(3), 'String',sprintf('Contrast: %d',contrst)); + + % histogram equalization + out = cv.BrightEdges(h.src, ... + 'Contrast',contrst, 'ShortRange',shortRange, 'LongRange',longRange); + + % show result + set(h.img, 'CData',out); + drawnow; +end + +function h = buildGUI(img) + %BUILDGUI Creates the UI + + % parameters + contrst = 1; + shortRange = 1; + longRange = 4; + out = cv.BrightEdges(img); + sz = size(img); + sz(2) = max(sz(2), 300); % minimum figure width + + % build the user interface (no resizing to keep it simple) + h = struct(); + h.src = img; + h.fig = figure('Name','BrightEdges Demo', ... + 'NumberTitle','off', 'Menubar','none', 'Resize','off', ... + 'Position',[200 200 sz(2) sz(1)+80-1]); + if ~mexopencv.isOctave() + %HACK: not implemented in Octave + movegui(h.fig, 'center'); + end + h.ax = axes('Parent',h.fig, 'Units','pixels', 'Position',[1 80 sz(2) sz(1)]); + if ~mexopencv.isOctave() + h.img = imshow(out, 'Parent',h.ax); + else + %HACK: https://savannah.gnu.org/bugs/index.php?45473 + axes(h.ax); + h.img = imshow(out); + end + h.txt(1) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 5 130 20], 'String','LongRange:'); + h.txt(2) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 30 130 20], 'String','ShortRange:'); + h.txt(3) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 55 130 20], 'String','Contrast:'); + h.slid(1) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',longRange, 'Min',1, 'Max',30, 'SliderStep',[1 5]./(30-1), ... + 'Position',[135 5 sz(2)-135-5 20]); + h.slid(2) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',shortRange, 'Min',1, 'Max',30, 'SliderStep',[1 5]./(30-1), ... + 'Position',[135 30 sz(2)-135-5 20]); + h.slid(3) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',contrst, 'Min',0, 'Max',100, 'SliderStep',[1 10]./(100-0), ... + 'Position',[135 55 sz(2)-135-5 20]); + + % hook event handlers, and trigger default start + set(h.slid, 'Callback',{@onChange,h}, ... + 'Interruptible','off', 'BusyAction','cancel'); + onChange([],[],h); +end diff --git a/opencv_contrib/src/+cv/BrightEdges.cpp b/opencv_contrib/src/+cv/BrightEdges.cpp new file mode 100644 index 000000000..746a08769 --- /dev/null +++ b/opencv_contrib/src/+cv/BrightEdges.cpp @@ -0,0 +1,50 @@ +/** + * @file BrightEdges.cpp + * @brief mex interface for cv::ximgproc::BrightEdges + * @ingroup ximgproc + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/ximgproc.hpp" +using namespace std; +using namespace cv; +using namespace cv::ximgproc; + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=1 && (nrhs%2)==1 && nlhs<=1); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + + // Option processing + int contrast = 1; + int shortrange = 3; + int longrange = 9; + for (int i=1; i Date: Wed, 31 Jan 2018 17:16:58 +0200 Subject: [PATCH 23/36] ximgproc: new ContourFitting plus Fourier descriptors demo --- opencv_contrib/+cv/ContourFitting.m | 287 ++++++++++++++++++ .../samples/fourier_descriptors_demo_gui.m | 202 ++++++++++++ .../src/+cv/private/ContourFitting_.cpp | 240 +++++++++++++++ .../test/unit_tests/TestContourFitting.m | 113 +++++++ 4 files changed, 842 insertions(+) create mode 100644 opencv_contrib/+cv/ContourFitting.m create mode 100644 opencv_contrib/samples/fourier_descriptors_demo_gui.m create mode 100644 opencv_contrib/src/+cv/private/ContourFitting_.cpp create mode 100644 opencv_contrib/test/unit_tests/TestContourFitting.m diff --git a/opencv_contrib/+cv/ContourFitting.m b/opencv_contrib/+cv/ContourFitting.m new file mode 100644 index 000000000..96a9ba6ac --- /dev/null +++ b/opencv_contrib/+cv/ContourFitting.m @@ -0,0 +1,287 @@ +classdef ContourFitting < handle + %CONTOURFITTING Contour Fitting algorithm using Fourier descriptors + % + % Contour fitting matches two contours `z_a` and `z_b` minimizing distance + % `d(z_a, z_b) = sum_n (a_n - s * b_n * exp(j * (n * alpha + phi)))^2` + % where `a_n` and `b_n` are Fourier descriptors of `z_a` and `z_b` and `s` + % is a scaling factor and `phi` is angle rotation and `alpha` is starting + % point factor adjustement. + % + % ## References: + % [PersoonFu1977]: + % > E Persoon and King-Sun Fu. "Shape discrimination using fourier + % > descriptors". IEEE Transactions on Pattern Analysis and Machine + % > Intelligence, 7(3):170-179, 1977. + % + % [BergerRaghunathan1998]: + % > L Berger, V A Raghunathan, C Launay, D Ausserre, and Y Gallot. + % > "Coalescence in 2 dimensions: experiments on thin copolymer films and + % > numerical simulations". The European Physical Journal B - Condensed + % > Matter and Complex Systems, 2(1):93-99, 1998. + % + % See also: cv.ContourFitting.ContourFitting, cv.matchShapes, + % cv.ShapeContextDistanceExtractor + % + + properties (SetAccess = private) + % Object ID + id + end + + properties (Dependent) + % number of Fourier descriptors used in + % cv.ContourFitting.estimateTransformation equal to number of contour + % points after resampling. + CtrSize + % number of Fourier descriptors used for optimal curve matching in + % cv.ContourFitting.estimateTransformation when using vector of points + FDSize + end + + %% ContourFitting + methods + function this = ContourFitting(varargin) + %CONTOURFITTING Create ContourFitting object + % + % obj = cv.ContourFitting() + % obj = cv.ContourFitting('OptionName',optionValue, ...) + % + % ## Options + % * __CtrSize__ number of contour points after resampling. + % default 1024 + % * __FDSize__ number of Fourier descriptors. default 16 + % + % See also: cv.ContourFitting.estimateTransformation + % + this.id = ContourFitting_(0, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % obj.delete() + % + % See also: cv.ContourFitting + % + if isempty(this.id), return; end + ContourFitting_(this.id, 'delete'); + end + + function [alphaPhiST, d] = estimateTransformation(this, src, ref, varargin) + %ESTIMATETRANSFORMATION Fits two closed curves using Fourier descriptors + % + % [alphaPhiST, d] = obj.estimateTransformation(src, ref) + % [...] = obj.estimateTransformation(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __src__ Contour defining first shape (source), or Fourier + % descriptors if `FD` is true. + % * __ref__ Contour defining second shape (target), or Fourier + % descriptors if `FD` is true. + % + % ## Output + % * __alphaPhiST__ transformation as a 5-elements vector + % `[alpha, phi, s, Tx, Ty]`, where: + % * __alpha__ starting point factor adjustement + % * __phi__ angle rotation in radian + % * __s__ scaling factor + % * __Tx__, __Ty__ the translation + % * __d__ distance between `src` and `ref` after matching. + % + % ## Options + % * __FD__ If false then `src` and `ref` are contours, and + % if true `src` and `ref` are Fourier descriptors. default false + % + % When `FD` is false, it applies cv.ContourFitting.contourSampling + % and cv.ContourFitting.fourierDescriptor to compute Fourier + % descriptors. + % + % More details in [PersoonFu1977] and [BergerRaghunathan1998]. + % + % See also: cv.ContourFitting.transformFD, + % cv.ContourFitting.contourSampling, + % cv.ContourFitting.fourierDescriptor + % + [alphaPhiST, d] = ContourFitting_(this.id, 'estimateTransformation', src, ref, varargin{:}); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.ContourFitting.empty, cv.ContourFitting.load + % + ContourFitting_(this.id, 'clear'); + end + + function b = empty(this) + %EMPTY Checks if algorithm object is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the algorithm object is empty + % (e.g. in the very beginning or after unsuccessful read). + % + % See also: cv.ContourFitting.clear, cv.ContourFitting.load + % + b = ContourFitting_(this.id, 'empty'); + end + + function save(this, filename) + %SAVE Saves the algorithm parameters to a file + % + % obj.save(filename) + % + % ## Input + % * __filename__ Name of the file to save to. + % + % This method stores the algorithm parameters in the specified + % XML or YAML file. + % + % See also: cv.ContourFitting.load + % + ContourFitting_(this.id, 'save', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads algorithm from a file or a string + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % obj.load(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __fname__ Name of the file to read. + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __ObjName__ The optional name of the node to read (if empty, + % the first top-level node will be used). default empty + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % This method reads algorithm parameters from the specified XML or + % YAML file (either from disk or serialized string). The previous + % algorithm state is discarded. + % + % See also: cv.ContourFitting.save + % + ContourFitting_(this.id, 'load', fname_or_str, varargin{:}); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.ContourFitting.save, cv.ContourFitting.load + % + name = ContourFitting_(this.id, 'getDefaultName'); + end + end + + %% Getters/Setters + methods + function value = get.CtrSize(this) + value = ContourFitting_(this.id, 'get', 'CtrSize'); + end + function set.CtrSize(this, value) + ContourFitting_(this.id, 'set', 'CtrSize', value); + end + + function value = get.FDSize(this) + value = ContourFitting_(this.id, 'get', 'FDSize'); + end + function set.FDSize(this, value) + ContourFitting_(this.id, 'set', 'FDSize', value); + end + end + + %% Static functions + methods (Static) + function out = contourSampling(src, numElt) + %CONTOURSAMPLING Contour sampling + % + % out = cv.ContourFitting.contourSampling(src, numElt) + % + % ## Input + % * __src__ input contour, vector of 2D points stored in numeric + % array Nx2/Nx1x2/1xNx2 or cell array of 2-element vectors + % `{[x,y], ...}`. + % * __NumElt__ number of points in `out` contour. + % + % ## Output + % * __out__ output contour with `numElt` points. + % + % See also: cv.findContours, cv.approxPolyDP + % + out = ContourFitting_(0, 'contourSampling', src, numElt); + end + + function dst = fourierDescriptor(src, varargin) + %FOURIERDESCRIPTOR Fourier descriptors for planed closed curves + % + % dst = cv.ContourFitting.fourierDescriptor(src) + % dst = cv.ContourFitting.fourierDescriptor(src, 'OptionName',optionValue, ...) + % + % ## Input + % * __src__ input contour, vector of 2D points stored in numeric + % array Nx2/Nx1x2/1xNx2 or cell array of 2-element vectors + % `{[x,y], ...}`. + % + % ## Output + % * __dst__ 2-channel array of type `single` and length `NumElt`. + % + % ## Options + % * __NumElt__ number of rows in `dst` or cv.getOptimalDFTSize + % rows if `NumElt=-1`. default -1 + % * __NumFD__ number of FD to return in `dst`, + % `dst = [FD(1...NumFD/2) FD(NumFD/2-NumElt+1...:NumElt)]`. + % default -1 (return all of FD as is). + % + % For more details about this implementation, please see + % [PersoonFu1977]. + % + % See also: cv.dft + % + dst = ContourFitting_(0, 'fourierDescriptor', src, varargin{:}); + end + + function dst = transformFD(src, t, varargin) + %TRANSFORMFD Transform a contour + % + % dst = cv.ContourFitting.transformFD(src, t) + % dst = cv.ContourFitting.transformFD(src, t, 'OptionName',optionValue, ...) + % + % ## Input + % * __src__ contour, or Fourier descriptors if `FD` is true. + % * __t__ 1x5 transform matrix given by + % cv.ContourFitting.estimateTransformation method. + % + % ## Output + % * __dst__ 2-channel matrix of type `double` and `NumElt` rows. + % + % ## Options + % * __FD__ if true `src` are Fourier descriptors, if false `src` + % is a contour. default true + % + % See also: cv.ContourFitting.estimateTransformation, + % cv.ContourFitting.contourSampling, + % cv.ContourFitting.fourierDescriptor + % + dst = ContourFitting_(0, 'transformFD', src, t, varargin{:}); + end + end + +end diff --git a/opencv_contrib/samples/fourier_descriptors_demo_gui.m b/opencv_contrib/samples/fourier_descriptors_demo_gui.m new file mode 100644 index 000000000..c50fca4d3 --- /dev/null +++ b/opencv_contrib/samples/fourier_descriptors_demo_gui.m @@ -0,0 +1,202 @@ +%% Fourier Descriptors Demo +% Demostrates using Fourier descriptors for contour matching. +% +% Sources +% +% * +% + +function varargout = fourier_descriptors_demo_gui() + % create the UI + h = buildGUI(); + if nargout > 0, varargout{1} = h; end +end + +function out = noisyPolygon(pts, noise) + if noise == 0 + out = pts; + return; + end + try + % we want reproducible random numbers + rng('default') + end + pts = pts + (rand(size(pts)) * 2*noise - noise); + out = pts(1,:); + for i=1:size(pts,1) + next = i + 1; + if next > size(pts,1), next = 1; end + u = pts(next,:) - pts(i,:); + d = norm(u); + a = atan2(u(2), u(1)); + step = max(d/noise, 1); + for j=1:step:d + pAct = u * j/d; + r = rand() * noise; + theta = a + rand()*2*pi; + pNew = r*[cos(theta), sin(theta)] + pAct + pts(i,:); + out(end+1,:) = pNew; + end + end + out = fix(out); +end + +function img = FDCurveMatching(p) + % reference shape with 5 vertices + ctr0 = [250 250; 400 250; 400 300; 250 300; 180 270]; + + % noisy shape, transformed (rotate and scale) + M = cv.getRotationMatrix2D([p.xg, p.yg], p.angle, 10/p.scale); + ctr1 = noisyPolygon(ctr0, p.levelNoise); + ctr1 = permute(cv.transform(permute(ctr1, [1 3 2]), M), [1 3 2]); + + % phase-shift (i.e same order just different starting point) + n = size(ctr1,1); + orig = fix(p.origin/100 * n); + ctr1 = circshift(ctr1, orig, 1); + + % estimate transformation + if true + obj = cv.ContourFitting('FDSize',16, 'CtrSize',256); + t = obj.estimateTransformation(ctr1, ctr0, 'FD',false); + else + % explicit contour sampling with 256 points + ctr0s = cv.ContourFitting.contourSampling(ctr0, 256); + ctr1s = cv.ContourFitting.contourSampling(ctr1, 256); + obj = cv.ContourFitting('FDSize',16); + t = obj.estimateTransformation(ctr1s, ctr0s, 'FD',false); + end + + % fix t values to same range as ours: origin in (0,1)*n, angle in (0,360) + if t(1) < 0, t(1) = 1 + t(1); end + if t(2) < 0, t(2) = 2*pi + t(2); end + fprintf('Transform: t=%s\n', mat2str(t,3)); + fprintf(' Origin = %f, expected %d (%d)\n', t(1)*n, orig, n); + fprintf(' Angle = %f, expected %d\n', t(2)*180/pi, p.angle); + fprintf(' Scale = %f, expected %g\n', t(3), p.scale/10); + + % apply estimated transformation to bring noisy shape to reference shape + ctr2 = cv.ContourFitting.transformFD(ctr1, t, 'FD',false); + ctr2 = cat(1, ctr2{:}); + + % draw the three contours + C = {ctr0, ctr1, ctr2}; + clr = [255 0 0; 0 255 0; 0 255 255]; + txt = {'reference', 'noisy', 'recovered'}; + + % output image size + rect = [0 0 500 500]; + if false + for i=1:numel(C) + rect = cv.Rect.union(rect, cv.boundingRect(C{i})); + end + end + + img = zeros([rect(3:4) 3], 'uint8'); + for i=1:numel(C) + % legend + img = cv.putText(img, txt{i}, [10 20*i], ... + 'Color',round(clr(i,:)*0.8), 'FontScale',0.5); + % contour + img = cv.drawContours(img, C{i}, 'Color',clr(i,:)); + % starting point + img = cv.circle(img, C{i}(1,:), 5, 'Color',clr(i,:)); + end +end + +function onChange(~,~,h) + %ONCHANGE Event handler for UI controls + + % retrieve current values from UI controls + p = struct(); + p.levelNoise = round(get(h.slid(6), 'Value')); + p.angle = round(get(h.slid(5), 'Value')); + p.scale = round(get(h.slid(4), 'Value')); + p.origin = round(get(h.slid(3), 'Value')); + p.xg = round(get(h.slid(2), 'Value')); + p.yg = round(get(h.slid(1), 'Value')); + set(h.txt(1), 'String',sprintf('Yg: %d',p.yg)); + set(h.txt(2), 'String',sprintf('Xg: %d',p.xg)); + set(h.txt(3), 'String',sprintf('Origin%%: %d',p.origin)); + set(h.txt(4), 'String',sprintf('Scale: %d',p.scale)); + set(h.txt(5), 'String',sprintf('Angle: %d',p.angle)); + set(h.txt(6), 'String',sprintf('Noise: %d',p.levelNoise)); + + % perform contour matching using Fourier descriptors + img = FDCurveMatching(p); + + % show result + set(h.img, 'CData',img); + drawnow; +end + +function h = buildGUI() + %BUILDGUI Creates the UI + + % canvas + img = zeros([500 500 3], 'uint8'); + sz = size(img); + + % initial params + % (a 45 degree rotation centered at [250,250] with a scaling of 5/10) + p = struct(); + p.levelNoise = 6; + p.angle = 45; + p.scale = 5; + p.origin = 10; + p.xg = 250; + p.yg = 250; + + % build the user interface (no resizing to keep it simple) + h = struct(); + h.fig = figure('Name','FD Curve matching', ... + 'NumberTitle','off', 'Menubar','none', 'Resize','off', ... + 'Position',[200 200 sz(2) sz(1)+155-1]); + if ~mexopencv.isOctave() + %HACK: not implemented in Octave + movegui(h.fig, 'center'); + end + h.ax = axes('Parent',h.fig, 'Units','pixels', 'Position',[1 155 sz(2) sz(1)]); + if ~mexopencv.isOctave() + h.img = imshow(img, 'Parent',h.ax); + else + %HACK: https://savannah.gnu.org/bugs/index.php?45473 + axes(h.ax); + h.img = imshow(img); + end + h.txt(1) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 5 100 20], 'String',sprintf('Yg: %d',p.yg)); + h.txt(2) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 30 100 20], 'String',sprintf('Xg: %d',p.xg)); + h.txt(3) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 55 100 20], 'String',sprintf('Origin%%: %d',p.origin)); + h.txt(4) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 80 100 20], 'String',sprintf('Scale: %d',p.scale)); + h.txt(5) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 105 100 20], 'String',sprintf('Angle: %d',p.angle)); + h.txt(6) = uicontrol('Parent',h.fig, 'Style','text', 'FontSize',11, ... + 'Position',[5 130 100 20], 'String',sprintf('Noise: %d',p.levelNoise)); + h.slid(1) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',p.yg, 'Min',150, 'Max',350, 'SliderStep',[2 20]./(350-150), ... + 'Position',[105 5 sz(2)-105-5 20]); + h.slid(2) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',p.xg, 'Min',150, 'Max',350, 'SliderStep',[2 20]./(350-150), ... + 'Position',[105 30 sz(2)-105-5 20]); + h.slid(3) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',p.origin, 'Min',0, 'Max',100, 'SliderStep',[1 10]./(100-0), ... + 'Position',[105 55 sz(2)-105-5 20]); + h.slid(4) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',p.scale, 'Min',5, 'Max',50, 'SliderStep',[1 5]./(50-5), ... + 'Position',[105 80 sz(2)-105-5 20]); + h.slid(5) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',p.angle, 'Min',0, 'Max',360, 'SliderStep',[2 20]./(360-0), ... + 'Position',[105 105 sz(2)-105-5 20]); + h.slid(6) = uicontrol('Parent',h.fig, 'Style','slider', ... + 'Value',p.levelNoise, 'Min',0, 'Max',20, 'SliderStep',[1 5]./(20-0), ... + 'Position',[105 130 sz(2)-105-5 20]); + + % hook event handlers, and trigger default start + opts = {'Interruptible','off', 'BusyAction','cancel'}; + set(h.slid, 'Callback',{@onChange,h}, opts{:}); + onChange([],[],h); +end diff --git a/opencv_contrib/src/+cv/private/ContourFitting_.cpp b/opencv_contrib/src/+cv/private/ContourFitting_.cpp new file mode 100644 index 000000000..c74fd59b5 --- /dev/null +++ b/opencv_contrib/src/+cv/private/ContourFitting_.cpp @@ -0,0 +1,240 @@ +/** + * @file ContourFitting_.cpp + * @brief mex interface for cv::ximgproc::ContourFitting + * @ingroup ximgproc + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/ximgproc.hpp" +using namespace std; +using namespace cv; +using namespace cv::ximgproc; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=2 && nlhs<=2); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + string method(rhs[1].toString()); + + // Constructor is called. Create a new object from argument + if (method == "new") { + nargchk(nrhs>=2 && (nrhs%2)==0 && nlhs<=1); + int ctr = 1024; + int fd = 16; + for (int i=2; i src(rhs[2].toVector()), out; + contourSampling(src, out, nbElt); + plhs[0] = MxArray(out); + } + else { + Mat src(rhs[2].toMat(CV_32F)), out; + bool cn1 = (src.channels() == 1); + if (cn1) src = src.reshape(2,0); + contourSampling(src, out, nbElt); + if (cn1) out = out.reshape(1,0); + plhs[0] = MxArray(out); + } + return; + } + else if (method == "fourierDescriptor") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs<=1); + int nbElt = -1; + int nbFD = -1; + for (int i=3; i src(rhs[2].toVector()); + fourierDescriptor(src, dst, nbElt, nbFD); + } + else { + Mat src(rhs[2].toMat(CV_32F).reshape(2,0)); + fourierDescriptor(src, dst, nbElt, nbFD); + } + plhs[0] = MxArray(dst); + return; + } + else if (method == "transformFD") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=1); + bool fdContour = true; + for (int i=4; i src(rhs[2].toVector()), dst; + transformFD(src, t, dst, fdContour); + plhs[0] = MxArray(dst); + } + else { + Mat src(rhs[2].toMat(CV_32F)), dst; + bool cn1 = (src.channels() == 1); + if (cn1) src = src.reshape(2,0); + transformFD(src, t, dst, fdContour); + if (cn1) dst = dst.reshape(1,0); + plhs[0] = MxArray(dst); + } + return; + } + + // Big operation switch + Ptr obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==2 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==2 && nlhs==0); + obj->clear(); + } + else if (method == "load") { + nargchk(nrhs>=3 && (nrhs%2)==1 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=3; i(rhs[2].toString(), objname) : + Algorithm::load(rhs[2].toString(), objname)); + */ + ///* + // HACK: workaround for missing ContourFitting::create() + FileStorage fs(rhs[2].toString(), FileStorage::READ + + (loadFromString ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + FileNode fn(objname.empty() ? fs.getFirstTopLevelNode() : fs[objname]); + if (fn.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to get node"); + obj->read(fn); + //*/ + } + else if (method == "save") { + nargchk(nrhs==3 && nlhs==0); + obj->save(rhs[2].toString()); + } + else if (method == "empty") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==2 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "estimateTransformation") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=2); + bool fdContour = false; + for (int i=4; i src(rhs[2].toVector()), + ref(rhs[3].toVector()); + obj->estimateTransformation(src, ref, alphaPhiST, dist, fdContour); + } + else { + Mat src(rhs[2].toMat(CV_32F).reshape(2,0)), + ref(rhs[3].toMat(CV_32F).reshape(2,0)); + obj->estimateTransformation(src, ref, alphaPhiST, dist, fdContour); + } + plhs[0] = MxArray(alphaPhiST); + if (nlhs > 1) + plhs[1] = MxArray(dist); + } + else if (method == "get") { + nargchk(nrhs==3 && nlhs<=1); + string prop(rhs[2].toString()); + if (prop == "CtrSize") + plhs[0] = MxArray(obj->getCtrSize()); + else if (prop == "FDSize") + plhs[0] = MxArray(obj->getFDSize()); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized property %s", prop.c_str()); + } + else if (method == "set") { + nargchk(nrhs==4 && nlhs==0); + string prop(rhs[2].toString()); + if (prop == "CtrSize") + obj->setCtrSize(rhs[3].toInt()); + else if (prop == "FDSize") + obj->setFDSize(rhs[3].toInt()); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized property %s", prop.c_str()); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/test/unit_tests/TestContourFitting.m b/opencv_contrib/test/unit_tests/TestContourFitting.m new file mode 100644 index 000000000..cc4ad847f --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestContourFitting.m @@ -0,0 +1,113 @@ +classdef TestContourFitting + %TestContourFitting + + methods (Static) + function test_contourSampling + c = get_contour(); + + % cell array + c2 = cv.ContourFitting.contourSampling(c, 100); + validateattributes(c2, {'cell'}, {'numel',100}); + + % Nx2 + c = cat(1, c{:}); + c2 = cv.ContourFitting.contourSampling(c, 100); + validateattributes(c2, {'numeric'}, {'size',[100 2]}); + + % Nx1x2 + c = permute(c, [1 3 2]); + c2 = cv.ContourFitting.contourSampling(c, 100); + validateattributes(c2, {'numeric'}, {'size',[100 1 2]}); + end + + function test_fourierDescriptor + c = get_contour(); + + % cell array + fd = cv.ContourFitting.fourierDescriptor(c, 'NumFD',30); + validateattributes(fd, {'numeric'}, {}); + assert(length(fd)==30 && size(fd,3)==2); + + % Nx2 + c = cat(1, c{:}); + fd = cv.ContourFitting.fourierDescriptor(c, 'NumFD',30); + validateattributes(fd, {'numeric'}, {}); + assert(length(fd)==30 && size(fd,3)==2); + + % Nx1x2 + c = permute(c, [1 3 2]); + fd = cv.ContourFitting.fourierDescriptor(c, 'NumFD',30); + validateattributes(fd, {'numeric'}, {}); + assert(length(fd)==30 && size(fd,3)==2); + end + + function test_transformFD + c = get_contour(); + t = [0 pi/2 1 10 20]; % [alpha, phi, s, Tx, Ty] + + % cell array + c2 = cv.ContourFitting.transformFD(c, t, 'FD',false); + validateattributes(c2, {'cell'}, {'nonempty'}); + + % Nx2 + c = cat(1, c{:}); + c2 = cv.ContourFitting.transformFD(c, t, 'FD',false); + validateattributes(c2, {'cell'}, {'nonempty'}); + + % Nx1x2 + c = permute(c, [1 3 2]); + c2 = cv.ContourFitting.transformFD(c, t, 'FD',false); + validateattributes(c2, {'cell'}, {'nonempty'}); + end + + function test_transformFD_2 + c = get_contour(); + t = [0 pi/2 1 10 20]; % [alpha, phi, s, Tx, Ty] + + n = cv.getOptimalDFTSize(numel(c)); + cc = cv.ContourFitting.contourSampling(c, n); + fd = cv.ContourFitting.fourierDescriptor(cc); + + fd2 = cv.ContourFitting.transformFD(fd, t, 'FD',true); + validateattributes(fd2, {'numeric'}, {'nonempty'}); + assert(length(fd2)==length(fd) && size(fd2,3)==2); + end + + function test_estimateTransformation + c1 = get_contour(); + c2 = cellfun(@(pt) pt+[10 20], c1, 'UniformOutput',false); + + obj = cv.ContourFitting(); + [t, d] = obj.estimateTransformation(c1, c2, 'FD',false); + validateattributes(t, {'double'}, {'vector', 'numel',5}); + validateattributes(d, {'numeric'}, {'scalar', 'nonnegative'}); + end + + function test_estimateTransformation_2 + c1 = get_contour(); + c2 = cellfun(@(pt) pt+[10 20], c1, 'UniformOutput',false); + + n = cv.getOptimalDFTSize(numel(c1)); + cc1 = cv.ContourFitting.contourSampling(c1, n); + cc2 = cv.ContourFitting.contourSampling(c2, n); + fd1 = cv.ContourFitting.fourierDescriptor(cc1); + fd2 = cv.ContourFitting.fourierDescriptor(cc2); + fd1 = reshape(fd1, [], 1, 2); + fd2 = reshape(fd2, [], 1, 2); + + obj = cv.ContourFitting(); + [t, d] = obj.estimateTransformation(fd1, fd2, 'FD',true); + validateattributes(t, {'double'}, {'vector', 'numel',5}); + validateattributes(d, {'numeric'}, {'scalar', 'nonnegative'}); + end + end + +end + +function [c, img] = get_contour() + im = fullfile(mexopencv.root(),'test','shape06.png'); + img = cv.imread(im, 'Grayscale',true); + c = cv.findContours(img, 'Mode','List', 'Method','None'); + [~,idx] = max(cellfun(@numel,c)); + c = c{idx}; +end From 79d74a43191460a18abd498dbc79e8418cf121cc Mon Sep 17 00:00:00 2001 From: Amro Date: Sat, 17 Feb 2018 22:55:59 +0200 Subject: [PATCH 24/36] ximgproc: expose RFFeatureGetter::getFeatures Function computes features, can be used when training own model (not implemented in opencv). Also changed options struct field names to match the ones in Piotr's Structured Edge Detection Toolbox. (#389) --- opencv_contrib/+cv/StructuredEdgeDetection.m | 43 ++++++++++++++++--- .../+cv/private/StructuredEdgeDetection_.cpp | 25 ++++++++--- .../unit_tests/TestStructuredEdgeDetection.m | 36 +++++++++++++--- 3 files changed, 88 insertions(+), 16 deletions(-) diff --git a/opencv_contrib/+cv/StructuredEdgeDetection.m b/opencv_contrib/+cv/StructuredEdgeDetection.m index 86810a1cb..519e7b089 100644 --- a/opencv_contrib/+cv/StructuredEdgeDetection.m +++ b/opencv_contrib/+cv/StructuredEdgeDetection.m @@ -69,11 +69,11 @@ % % src: source image to extract features % % features: output n-channel floating-point feature matrix % % opts: struct of options - % gnrmRad = opts.gradientNormalizationRadius; - % gsmthRad = opts.gradientSmoothingRadius; - % shrink = opts.shrinkNumber; - % outNum = opts.numberOfOutputChannels; - % gradNum = opts.numberOfGradientOrientations; + % gnrmRad = opts.normRad; % gradientNormalizationRadius + % gsmthRad = opts.grdSmooth; % gradientSmoothingRadius + % shrink = opts.shrink; % shrinkNumber + % outNum = opts.nChns; % numberOfOutputChannels + % gradNum = opts.nOrients; % numberOfGradientOrientations % % nsize = [size(src,1) size(src,2)] ./ shrink; % features = zeros([nsize outNum], 'single'); @@ -259,4 +259,37 @@ function load(this, fname_or_str, varargin) end end + %% Static functions + methods (Static) + function features = getFeatures(src, opts) + %GETFEATURES Extracts features from image + % + % features = cv.StructuredEdgeDetection.getFeatures(src, opts) + % + % ## Input + % * __src__ source image to extract features (RGB float in [0;1]). + % * __opts__ a scalar struct of random forest options + % (feature params), with the following fields: + % * __normRad__ `gradientNormalizationRadius` gradient + % normalization radius. + % * __grdSmooth__ `gradientSmoothingRadius` radius for smoothing + % of gradients (using convolution with triangle filter). + % * __shrink__ `shrinkNumber` amount to shrink channels. + % * __nChns__ `numberOfOutputChannels` number of edge + % orientation bins for output. + % * __nOrients__ `numberOfGradientOrientations` number of + % orientations per gradient scale. + % + % ## Output + % * __features__ extracted features. + % + % Extracted features are appropriate for StructuredEdgeDetection + % training. + % + % See also: cv.StructuredEdgeDetection.StructuredEdgeDetection + % + features = StructuredEdgeDetection_(0, 'getFeatures', src, opts); + end + end + end diff --git a/opencv_contrib/src/+cv/private/StructuredEdgeDetection_.cpp b/opencv_contrib/src/+cv/private/StructuredEdgeDetection_.cpp index 10fa5a384..628d32b50 100644 --- a/opencv_contrib/src/+cv/private/StructuredEdgeDetection_.cpp +++ b/opencv_contrib/src/+cv/private/StructuredEdgeDetection_.cpp @@ -46,11 +46,11 @@ class MatlabRFFeatureGetter : public cv::ximgproc::RFFeatureGetter { // create input to evaluate kernel function MxArray opts(MxArray::Struct()); - opts.set("gradientNormalizationRadius", gnrmRad); - opts.set("gradientSmoothingRadius", gsmthRad); - opts.set("shrinkNumber", shrink); - opts.set("numberOfOutputChannels", outNum); - opts.set("numberOfGradientOrientations", gradNum); + opts.set("normRad", gnrmRad); + opts.set("grdSmooth", gsmthRad); + opts.set("shrink", shrink); + opts.set("nChns", outNum); + opts.set("nOrients", gradNum); mxArray *lhs, *rhs[3]; rhs[0] = MxArray(fun_name); rhs[1] = MxArray(src); // CV_32FC3 @@ -131,6 +131,21 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) mexLock(); return; } + // static method call + else if (method == "getFeatures") { + nargchk(nrhs==4 && nlhs<=1); + Mat src(rhs[2].toMat(CV_32F)), + features; + int gnrmRad = rhs[3].at("normRad").toInt(), + gsmthRad = rhs[3].at("grdSmooth").toInt(), + shrink = rhs[3].at("shrink").toInt(), + outNum = rhs[3].at("nChns").toInt(), + gradNum = rhs[3].at("nOrients").toInt(); + createRFFeatureGetter()->getFeatures(src, features, + gnrmRad, gsmthRad, shrink, outNum, gradNum); + plhs[0] = MxArray(features); + return; + } // Big operation switch Ptr obj = obj_[id]; diff --git a/opencv_contrib/test/unit_tests/TestStructuredEdgeDetection.m b/opencv_contrib/test/unit_tests/TestStructuredEdgeDetection.m index edea07e41..b1bc8a787 100644 --- a/opencv_contrib/test/unit_tests/TestStructuredEdgeDetection.m +++ b/opencv_contrib/test/unit_tests/TestStructuredEdgeDetection.m @@ -27,9 +27,9 @@ end function test_custom_feat_extract - %TODO: custom feature extractor - if true - error('mexopencv:testskip', 'todo'); + % skip test if external M-file is not found on the path + if ~exist('myRFFeatureGetter.m', 'file') + error('mexopencv:testskip', 'undefined function'); end img = imread(TestStructuredEdgeDetection.im); @@ -40,14 +40,38 @@ 'myRFFeatureGetter'); E = pDollar.detectEdges(img); end + + function test_get_features + img = imread(TestStructuredEdgeDetection.im); + img = single(img) / 255.0; + + opts = struct(); + opts.normRad = 4; + opts.grdSmooth = 0; + opts.shrink = 2; + opts.nChns = 13; + opts.nOrients = 4; + + features = cv.StructuredEdgeDetection.getFeatures(img, opts); + validateattributes(features, {'numeric'}, {'real', 'ndims',3}); + sz = size(features); + %assert(sz(1) * opts.shrink == size(img,1)); + %assert(sz(2) * opts.shrink == size(img,2)); + assert(sz(3) == opts.nChns); + end end end function features = myRFFeatureGetter(src, opts) - nsize = [size(src,1) size(src,2)] ./ opts.shrinkNumber; - features = zeros([nsize opts.numberOfOutputChannels], 'single'); - %TODO: ... compute features + if false + nsize = fix([size(src,1) size(src,2)] ./ opts.shrink); + features = zeros([nsize opts.nChns], 'single'); + %TODO: ... compute features + else + % call opencv's implementation + features = cv.StructuredEdgeDetection.getFeatures(src, opts); + end end function fname = get_model_file() From 1fff1cc241e73cfd5485fe812b59b00b4eb5832f Mon Sep 17 00:00:00 2001 From: Amro Date: Fri, 2 Feb 2018 18:38:10 +0200 Subject: [PATCH 25/36] face: changes to FaceRecognizer serialization --- opencv_contrib/src/+cv/private/BasicFaceRecognizer_.cpp | 4 +++- opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/opencv_contrib/src/+cv/private/BasicFaceRecognizer_.cpp b/opencv_contrib/src/+cv/private/BasicFaceRecognizer_.cpp index 74840e059..d137fc946 100644 --- a/opencv_contrib/src/+cv/private/BasicFaceRecognizer_.cpp +++ b/opencv_contrib/src/+cv/private/BasicFaceRecognizer_.cpp @@ -137,7 +137,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) FileStorage fs(fname, FileStorage::READ + FileStorage::MEMORY); if (!fs.isOpened()) mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); - obj->read(fs.root()); //TODO: root or getFirstTopLevelNode ? + obj->read(fs.getFirstTopLevelNode()); } else obj->read(fname); @@ -150,7 +150,9 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) FileStorage fs(fname, FileStorage::WRITE + FileStorage::MEMORY); if (!fs.isOpened()) mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + fs << obj->getDefaultName() << "{"; obj->write(fs); + fs << "}"; plhs[0] = MxArray(fs.releaseAndGetString()); } else diff --git a/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp b/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp index 0d018cd29..8547470aa 100644 --- a/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp +++ b/opencv_contrib/src/+cv/private/LBPHFaceRecognizer_.cpp @@ -132,7 +132,7 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) FileStorage fs(fname, FileStorage::READ + FileStorage::MEMORY); if (!fs.isOpened()) mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); - obj->read(fs.root()); //TODO: root or getFirstTopLevelNode ? + obj->read(fs.getFirstTopLevelNode()); } else obj->read(fname); @@ -145,7 +145,9 @@ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) FileStorage fs(fname, FileStorage::WRITE + FileStorage::MEMORY); if (!fs.isOpened()) mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + fs << obj->getDefaultName() << "{"; obj->write(fs); + fs << "}"; plhs[0] = MxArray(fs.releaseAndGetString()); } else From a2db87eee9e046b7ef0d087d808501fd2179f83e Mon Sep 17 00:00:00 2001 From: Amro Date: Fri, 2 Feb 2018 20:17:26 +0200 Subject: [PATCH 26/36] face: new Facemark and FacemarkKazemi - three facemark algorithms (AAM, LBF, Kazemi) - in addition to several demos --- .gitignore | 5 + opencv_contrib/+cv/Facemark.m | 655 ++++++++++++++++++ opencv_contrib/+cv/FacemarkKazemi.m | 291 ++++++++ opencv_contrib/samples/face_swapping_demo.m | 293 ++++++++ .../samples/facemark_aam_train_demo.m | 324 +++++++++ .../samples/facemark_kazemi_detect_img_demo.m | 105 +++ .../samples/facemark_kazemi_detect_vid_demo.m | 124 ++++ .../samples/facemark_kazemi_train2_demo.m | 175 +++++ .../facemark_kazemi_train_config_demo.m | 68 ++ .../samples/facemark_kazemi_train_demo.m | 161 +++++ .../samples/facemark_lbf_fitting_demo.m | 134 ++++ .../samples/facemark_lbf_train_demo.m | 169 +++++ .../src/+cv/private/FacemarkKazemi_.cpp | 235 +++++++ opencv_contrib/src/+cv/private/Facemark_.cpp | 500 +++++++++++++ opencv_contrib/test/unit_tests/TestFacemark.m | 203 ++++++ .../test/unit_tests/TestFacemarkKazemi.m | 118 ++++ test/facemark/annotations.txt | 2 + test/facemark/config.xml | 11 + test/facemark/david1.jpg | Bin 0 -> 19981 bytes test/facemark/david1.pts | 72 ++ test/facemark/david1.txt | 69 ++ test/facemark/david2.jpg | Bin 0 -> 21695 bytes test/facemark/david2.pts | 72 ++ test/facemark/david2.txt | 69 ++ test/facemark/images.txt | 2 + test/facemark/points.txt | 2 + 26 files changed, 3859 insertions(+) create mode 100644 opencv_contrib/+cv/Facemark.m create mode 100644 opencv_contrib/+cv/FacemarkKazemi.m create mode 100644 opencv_contrib/samples/face_swapping_demo.m create mode 100644 opencv_contrib/samples/facemark_aam_train_demo.m create mode 100644 opencv_contrib/samples/facemark_kazemi_detect_img_demo.m create mode 100644 opencv_contrib/samples/facemark_kazemi_detect_vid_demo.m create mode 100644 opencv_contrib/samples/facemark_kazemi_train2_demo.m create mode 100644 opencv_contrib/samples/facemark_kazemi_train_config_demo.m create mode 100644 opencv_contrib/samples/facemark_kazemi_train_demo.m create mode 100644 opencv_contrib/samples/facemark_lbf_fitting_demo.m create mode 100644 opencv_contrib/samples/facemark_lbf_train_demo.m create mode 100644 opencv_contrib/src/+cv/private/FacemarkKazemi_.cpp create mode 100644 opencv_contrib/src/+cv/private/Facemark_.cpp create mode 100644 opencv_contrib/test/unit_tests/TestFacemark.m create mode 100644 opencv_contrib/test/unit_tests/TestFacemarkKazemi.m create mode 100644 test/facemark/annotations.txt create mode 100644 test/facemark/config.xml create mode 100644 test/facemark/david1.jpg create mode 100644 test/facemark/david1.pts create mode 100644 test/facemark/david1.txt create mode 100644 test/facemark/david2.jpg create mode 100644 test/facemark/david2.pts create mode 100644 test/facemark/david2.txt create mode 100644 test/facemark/images.txt create mode 100644 test/facemark/points.txt diff --git a/.gitignore b/.gitignore index ae1be6458..f60c8ac41 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,8 @@ octave-workspace /test/faceocc2.webm /test/dudek.webm /test/david.webm +/test/face_landmark_model.dat +/test/lbfmodel.yaml +/test/facemark/helen/ +/test/facemark/ibug/ +/test/facemark/lfpw/ diff --git a/opencv_contrib/+cv/Facemark.m b/opencv_contrib/+cv/Facemark.m new file mode 100644 index 000000000..ec75b7531 --- /dev/null +++ b/opencv_contrib/+cv/Facemark.m @@ -0,0 +1,655 @@ +classdef Facemark < handle + %FACEMARK Base class for all facemark models + % + % Facial landmark detection is a useful algorithm with many possible + % applications including expression transfer, virtual make-up, and facial + % puppetry. This class implements an API for facial landmark detector. + % Two kinds of algorithms are implemented including active appearance + % model [AAM] and regressed local binary features [LBF] able to work in + % real-time. + % + % All facemark models in OpenCV are derived from the abstract base class + % Facemark, which provides a unified access to all facemark algorithms in + % OpenCV. + % + % To utilize the Facemark API in your program, please take a look at the + % opencv tutorials, as well as mexopencv samples. + % + % ## Description + % Facemark is a base class which provides universal access to any specific + % facemark algorithm. Therefore, the users should declare a desired + % algorithm before they can use it in their application. + % + % The typical pipeline for facemark detection is listed as follows: + % + % - (Non-mandatory) Set a user defined face detection using + % cv.Facemark.setFaceDetector. The facemark algorithms are desgined + % to fit the facial points into a face. Therefore, the face information + % should be provided to the facemark algorithm. Some algorithms might + % provides a default face recognition function. However, the users might + % prefer to use their own face detector to obtains the best possible + % detection result. + % - (Non-mandatory) Training the model for a specific algorithm using + % cv.Facemark.training. In this case, the model should be + % automatically saved by the algorithm. If the user already have a + % trained model, then this part can be omitted. + % - Load the trained model using cv.Facemark.loadModel. + % - Perform the fitting via the cv.Facemark.fit. + % + % ### Example + % Here is an example of loading a pretrained model + % (you should provide full paths to files specified below): + % + % obj = cv.Facemark('LBF', 'CascadeFace','lbpcascade_frontalface.xml'); + % obj.loadModel('lbfmodel.yaml'); + % img = imread('lena.jpg'); + % faces = obj.getFaces(img); + % landmarks = obj.fit(img, faces); + % for i=1:numel(faces) + % img = cv.rectangle(img, faces{i}, 'Color','g'); + % img = cv.Facemark.drawFacemarks(img, landmarks{i}); + % end + % imshow(img) + % + % ### Example + % Here is an example of training a model: + % + % % filename to save the trained model + % obj = cv.Facemark('LBF', 'ModelFilename','ibug68.model'); + % obj.setFaceDetector('myFaceDetector'); + % + % % load the list of dataset: image paths and landmark file paths + % [imgFiles, ptsFiles] = cv.Facemark.loadDatasetList(... + % 'data/images_train.txt', 'data/points_train.txt'); + % + % % add training samples + % for i=1:numel(imgFiles) + % img = imread(imgFiles{i}); + % pts = cv.Facemark.loadFacePoints(ptsFiles{i}); + % obj.addTrainingSample(img, pts); + % end + % obj.training(); + % + % ## References + % [AAM]: + % > G. Tzimiropoulos and M. Pantic, "Optimization problems for fast AAM + % > fitting in-the-wild," ICCV 2013. + % + % [LBF]: + % > S. Ren, et al. , "Face alignment at 3000 fps via regressing local + % > binary features", CVPR 2014. + % + % See also: cv.FacemarkKazemi, cv.Facemark.Facemark + % + + properties (SetAccess = private) + % Object ID + id + % name of custom face detector function + % (due to an implementation detail, + % we must maintain its state and keep passing it on each call) + func + end + + %% Constructor/destructor + methods + function this = Facemark(ftype, varargin) + %FACEMARK Constructor + % + % obj = cv.Facemark(ftype) + % obj = cv.Facemark(ftype, 'OptionName',optionValue, ...) + % + % ## Input + % * __ftype__ Facemark algorithm, one of: + % * __LBF__ regressed local binary features (LBF). + % * __AAM__ active appearance model (AAM). + % + % ## Options for LBF + % * __ShapeOffset__ offset for the loaded face landmark points. + % default 0.0 + % * __CascadeFace__ filename of the face detector model. default '' + % * __Verbose__ show the training print-out. default true + % * __NLandmarks__ number of landmark points. default 68 + % * __InitShapeN__ multiplier for augment the training data. + % default 10 + % * __StagesN__ number of refinement stages. default 5 + % * __TreeN__ number of tree in the model for each landmark point + % refinement. default 6 + % * __TreeDepth__ the depth of decision tree, defines the size of + % feature. default 5 + % * __BaggingOverlap__ overlap ratio for training the LBF feature. + % default 0.4 + % * __ModelFilename__ filename where the trained model will be + % saved (Base64 encoded). default '' + % * __SaveModel__ flag to save the trained model or not. + % default true + % * __Seed__ seed for shuffling the training data. default 0 + % * __FeatsM__ default `[500,500,500,300,300,300,200,200,200,100]` + % * __RadiusM__ + % default `[0.3,0.2,0.15,0.12,0.10,0.10,0.08,0.06,0.06,0.05]` + % * __Pupils__ index of facemark points on pupils of left and + % right eye. default `{[36,37,38,39,40,41], [42,43,44,45,46,47]}` + % * __DetectROI__ default `[-1,-1,-1,-1]` + % + % ## Options for AAM + % * __ModelFilename__ filename where the trained model will be + % saved (Base64 encoded). default '' + % * __SaveModel__ flag to save the trained model or not. + % default true + % * __M__ default 200 + % * __N__ default 10 + % * __NIter__ default 50 + % * __Verbose__ show the training print-out. default true + % * __MaxM__ default 550 + % * __MaxN__ default 136 + % * __TextureMaxM__ default 145 + % * __Scales__ the scales considered to build the model. + % default `[1.0,]` + % + % See also: cv.Facemark.loadModel + % + this.func = ''; + this.id = Facemark_(0, this.func, 'new', ftype, varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % obj.delete() + % + % See also: cv.Facemark + % + if isempty(this.id), return; end + Facemark_(this.id, this.func, 'delete'); + end + end + + %% Facemark + methods + function success = addTrainingSample(this, img, landmarks) + %ADDTRAININGSAMPLE Add one training sample to the trainer + % + % success = obj.addTrainingSample(img, landmarks) + % + % ## Input + % * __img__ Input image. + % * __landmarks__ The ground-truth of facial landmarks points + % corresponds to the image `{[x,y], ...}`. + % + % ## Output + % * __success__ success flag. + % + % In the case of LBF, this function internally calls the face + % detector function, so you will need to either pass the option + % `CascadeFace` to use the default detector, or set a custom + % function in cv.Facemark.setFaceDetector + % + % See also: cv.Facemark.training + % + success = Facemark_(this.id, this.func, 'addTrainingSample', img, landmarks); + end + + function training(this) + %TRAINING Trains a Facemark algorithm using the given dataset + % + % obj.training() + % + % Before the training process, training samples should be added to + % the trainer using cv.Facemark.addTrainingSample function. + % + % See also: cv.Facemark.addTrainingSample + % + Facemark_(this.id, this.func, 'training'); + end + + function loadModel(this, model) + %LOADMODEL A function to load the trained model before the fitting process + % + % obj.loadModel(model) + % + % ## Input + % * __model__ A string represent the filename of a trained model. + % + % See also: cv.Facemark.fit + % + Facemark_(this.id, this.func, 'loadModel', model); + end + + function [landmarks, success] = fit(this, img, faces, varargin) + %FIT Detect landmarks in faces + % + % [landmarks, success] = obj.fit(img, faces) + % [...] = obj.fit(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __img__ Input image. + % * __faces__ Detected faces `{[x,y,w,h], ...}`. + % + % ## Output + % * __landmarks__ The detected landmark points for each face + % `{{[x,y], ...}, ...}`. + % * __success__ success flag. + % + % ## Options for AAM + % * __Configs__ Optional runtime parameter for fitting process, + % only supported for AAM algorithm. A struct-array of the same + % length as `faces` with the following fields: + % * __R__ 2x2 rotation matrix. default `eye(2,'single')` + % * __t__ 2-elements translation vector. + % default `[size(img,2) size(img,1)] / 2` + % * __scale__ scaling factor. default 1.0 + % * __scaleIdx__ 0-based model scale index (into `Scales` vector + % from the constructor options). default 0 + % + % See also: cv.Facemark.loadModel + % + [landmarks, success] = Facemark_(this.id, this.func, 'fit', img, faces, varargin{:}); + end + + function success = setFaceDetector(this, detector) + %SETFACEDETECTOR Set a user-defined face detector for the Facemark algorithm + % + % success = obj.setFaceDetector(detector) + % + % ## Input + % * __detector__ The user-defined face detector, MATLAB function + % name. + % + % ## Output + % * __success__ success flag. + % + % The user-defined face detector should have the following + % signature: + % + % function faces = myFaceDetector(img) + % + % where `img` is the input image and `faces` is the output with + % the detected faces. + % + % Note that some algorithms might provide a default face detector + % (LBF does, but AAM does not). + % + % See also: cv.Facemark.getFaces + % + this.func = detector; + success = Facemark_(this.id, this.func, 'setFaceDetector', detector); + end + + function [faces, success] = getFaces(this, img) + %GETFACES Detect faces from a given image using default or user-defined face detector + % + % [faces, success] = obj.getFaces(img) + % + % ## Input + % * __img__ Input image. + % + % ## Output + % * __faces__ Output of the function which represent region of + % interest of the detected faces `{[x,y,w,h], ...}`. Each face + % is stored as rect. + % * __success__ success flag. + % + % See also: cv.Facemark.setFaceDetector + % + [faces, success] = Facemark_(this.id, this.func, 'getFaces', img); + end + + function [items, success] = getData(this) + %GETDATA Get data from an algorithm + % + % [items, success] = obj.getData() + % + % ## Output + % * __items__ The obtained data, algorithm dependent. + % * __success__ success flag. + % + % Only for AAM algorithm, not used in LBF (returns empty data). + % + % See also: cv.Facemark.setFaceDetector + % + [items, success] = Facemark_(this.id, this.func, 'getData'); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.Facemark.empty, cv.Facemark.load + % + Facemark_(this.id, this.func, 'clear'); + end + + function b = empty(this) + %EMPTY Checks if detector object is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the detector object is empty (e.g in the + % very beginning or after unsuccessful read). + % + % See also: cv.Facemark.clear, cv.Facemark.load + % + b = Facemark_(this.id, this.func, 'empty'); + end + + function varargout = save(this, filename) + %SAVE Saves a Facemark and its model state + % + % obj.save(filename) + % str = obj.save(filename) + % + % ## Input + % * __filename__ The filename to store this Facemark to (XML/YAML). + % + % ## Output + % * __str__ optional output. If requested, the model is persisted + % to a string in memory instead of writing to disk. + % + % Saves this model to a given filename, either as XML or YAML. + % + % Saves the state of a model to the given filename. + % + % See also: cv.Facemark.load + % + [varargout{1:nargout}] = Facemark_(this.id, this.func, 'write', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads a Facemark and its model state + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % + % ## Input + % * __fname__ The filename to load this Facemark from (XML/YAML). + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % Loads a persisted model and state from a given XML or YAML file. + % + % See also: cv.Facemark.save + % + Facemark_(this.id, this.func, 'read', fname_or_str, varargin{:}); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.Facemark.save, cv.Facemark.load + % + name = Facemark_(this.id, this.func, 'getDefaultName'); + end + end + + %% Static methods + methods (Static) + function [faces, success] = getFacesHAAR(img, faceCascadeName) + %GETFACESHAAR Default face detector + % + % [faces, success] = cv.Facemark.getFacesHAAR(img, faceCascadeName) + % + % ## Input + % * __img__ The input image to be processed. + % * __faceCascadeName__ Name of the XML file from which a trained + % cascade classifier is loaded. See cv.CascadeClassifier.load + % + % ## Output + % * __faces__ Output of the function which represent region of + % interest of the detected faces `{[x,y,w,h], ...}`. Each face + % is stored as rect. + % * __success__ success flag. + % + % This function is mainly utilized by the implementation of a + % Facemark algorithm. End users are advised to use function + % cv.Facemark.getFaces which can be manually defined and + % circumvented to the algorithm by cv.Facemark.setFaceDetector. + % + % ### Example + % + % faces = cv.Facemark.getFacesHAAR(img, 'haarcascade_frontalface_alt.xml'); + % for i=1:numel(faces) + % img = cv.rectangle(img, faces{i}, 'Color',[0 255 0]); + % end + % imshow(img) + % + % See also: cv.Facemark.setFaceDetector, cv.CascadeClassifier + % + [faces, success] = Facemark_(0, '', 'getFacesHAAR', img, faceCascadeName); + end + + function [imagesPaths, annotationsPaths, success] = loadDatasetList(imagesList, annotationsList) + %LOADDATASETLIST A utility to load list of paths to training images and annotation files + % + % [imagesPaths, annotationsPaths, success] = cv.Facemark.loadDatasetList(imagesList, annotationsList) + % + % ## Input + % * __imagesList__ The specified file contains paths to the + % training images. + % * __annotationsList__ The specified file contains paths to the + % training annotations. + % + % ## Output + % * __imagesPaths__ The loaded paths of training images. + % * __annotationsPaths__ The loaded paths of annotation files. + % * __success__ success flag. + % + % This format is utilized in most facial point annotation datasets + % (IBUG, HELEN, LPWF, etc.). There are two kinds of files provided + % in the dataset, images and their corresponding facial point data. + % The user provides the list of image files and annotations in two + % separate files. These files can be generated easily using `dir` + % or `ls` commands from the terminal. + % + % The contents of the list files follow a standard format with one + % path per line. + % + % Example contents of images list file: + % + % /path/to/image1.jpg + % /path/to/image2.jpg + % ... + % + % and contents of corresponding landmarks list file: + % + % /path/to/image1.pts + % /path/to/image2.pts + % ... + % + % where the format of |.pts| files is described in + % cv.Facemark.loadFacePoints function. + % + % See also: cv.Facemark.loadFacePoints, + % cv.Facemark.loadTrainingData1 + % + [imagesPaths, annotationsPaths, success] = Facemark_(0, '', 'loadDatasetList', imagesList, annotationsList); + end + + function [imagesPaths, landmarks, success] = loadTrainingData1(imagesList, annotationsList, varargin) + %LOADTRAININGDATA1 A utility to load facial landmark information from the dataset + % + % [imagesPaths, landmarks, success] = cv.Facemark.loadTrainingData1(imagesList, annotationsList) + % [...] = cv.Facemark.loadTrainingData1(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __imagesList__ A file contains the list of image filenames in + % the training dataset. + % * __annotationsList__ A file contains the list of filenames where + % the ground-truth landmarks points information are stored. The + % content in each file should follow the standard format (see + % cv.Facemark.loadFacePoints). + % + % ## Output + % * __imagesPaths__ A cell-aray where each element represent the + % filename of image in the dataset. Images are not loaded by + % default to save memory. + % * __landmarks__ The loaded landmark points for all training + % data `{{[x,y], ...}, ...}`. + % * __success__ success flag. + % + % ## Options + % * __Offset__ An offset value to adjust the loaded points. + % default 0.0 + % + % The same dataset format described in cv.Facemark.loadDatasetList + % function. However this function directly loads the annotation + % data instead of only returning their file paths. + % + % See also: cv.Facemark.loadDatasetList + % + [imagesPaths, landmarks, success] = Facemark_(0, '', 'loadTrainingData1', imagesList, annotationsList, varargin{:}); + end + + function [imagesPaths, landmarks, success] = loadTrainingData2(filename, varargin) + %LOADTRAININGDATA2 A utility to load facial landmark dataset from a single file + % + % [imagesPaths, landmarks, success] = cv.Facemark.loadTrainingData2(filename) + % [...] = cv.Facemark.loadTrainingData2(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __filename__ The filename of a file that contains the dataset + % information. Each line contains the filename of an image + % followed by pairs of `x` and `y` values of facial landmarks + % points separated by a space. + % + % ## Output + % * __imagesPaths__ A cell-aray where each element represent the + % filename of image in the dataset. Images are not loaded by + % default to save memory. + % * __landmarks__ The loaded landmark points for all training + % data `{{[x,y], ...}, ...}`. + % * __success__ success flag. + % + % ## Options + % * __Delim__ Delimiter between each element, the default value is + % a whitespace `' '`. + % * __Offset__ An offset value to adjust the loaded points. + % default 0.0 + % + % This dataset format simply consists of a single file, where each + % line contains an image path followed by list of x/y coordinates + % of its corresponding annotation. Number of points of each sample + % is not included, and the code will load all value pairs until + % the end of line. + % + % Example of file format expected: + % + % /path/to/image1.jpg 336.820955 240.864510 334.238298 260.922709 ... + % /path/to/image2.jpg 376.158428 230.845712 376.736984 254.924635 ... + % ... + % + % See also: cv.Facemark.loadTrainingData3 + % + [imagesPaths, landmarks, success] = Facemark_(0, '', 'loadTrainingData2', filename, varargin{:}); + end + + function [imagesPaths, landmarks, success] = loadTrainingData3(filenames) + %LOADTRAININGDATA3 Extracts the data for training from .txt files which contains the corresponding image name and landmarks + % + % [imagesPaths, landmarks, success] = cv.Facemark.loadTrainingData3(filenames) + % + % ## Input + % * __filenames__ A cell-array of strings containing names of the + % text files. + % + % ## Output + % * __imagesPaths__ A cell-array of strings which stores the + % filenames of images whose landmarks are tracked. + % * __landmarks__ A cell-array of cell-array of points that would + % store shape or landmarks of all images `{{[x,y],...}, ...}`. + % * __success__ success flag. It returns true when it reads the + % data successfully and false otherwise. + % + % An alternative dataset format. Similar to the one described in + % cv.Facemark.loadTrainingData2, but with separate files for each + % sample. + % + % The training data consists of |.txt| files whose first line + % contains the image name, followed by the annotations. Example: + % + % /path/to/image1.jpg + % 565.86 , 758.98 + % 564.27 , 781.14 + % ... + % + % See also: cv.Facemark.loadTrainingData2 + % + [landmarks, imagesPaths, success] = Facemark_(0, '', 'loadTrainingData3', filenames); + end + + function [points, success] = loadFacePoints(filename) + %LOADFACEPOINTS A utility to load facial landmark information from a given file + % + % [points, success] = cv.Facemark.loadFacePoints(filename) + % + % ## Input + % * __filename__ The filename of `.pts` file which contains the + % facial landmarks data. + % + % ## Output + % * __points__ The loaded facial landmark points `{[x,y], ...}`. + % * __success__ success flag. + % + % ## Options + % * __Offset__ An offset value to adjust the loaded points. + % default 0.0 + % + % The annotation file should follow the default format which is: + % + % version: 1 + % n_points: 68 + % { + % 212.716603 499.771793 + % 230.232816 566.290071 + % ... + % } + % + % where `n_points` is the number of points considered and each + % point is represented as its position in `x` and `y`. + % + % See also: cv.Facemark.loadDatasetList, + % cv.Facemark.loadTrainingData1 + % + [points, success] = Facemark_(0, '', 'loadFacePoints', filename); + end + + function img = drawFacemarks(img, points, varargin) + %DRAWFACEMARKS Utility to draw the detected facial landmark points + % + % img = cv.Facemark.drawFacemarks(img, points) + % img = cv.Facemark.drawFacemarks(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __img__ The input image to be processed. + % * __points__ Contains the data of points which will be drawn + % `{[x,y], ...}`. + % + % ## Output + % * __img__ Output image with drawn points. + % + % ## Options + % * __Color__ The color of points represented in BGR format, + % default `[255,0,0]`. + % + % See also: cv.circle, cv.Facemark.fit + % + img = Facemark_(0, '', 'drawFacemarks', img, points, varargin{:}); + end + end + +end diff --git a/opencv_contrib/+cv/FacemarkKazemi.m b/opencv_contrib/+cv/FacemarkKazemi.m new file mode 100644 index 000000000..9b0d8a79f --- /dev/null +++ b/opencv_contrib/+cv/FacemarkKazemi.m @@ -0,0 +1,291 @@ +classdef FacemarkKazemi < handle + %FACEMARKKAZEMI Face Alignment + % + % An implementation of state of the art face alignment technique proposed + % by [Kazemi2014]. + % + % The paper demonstrates how an ensemble of regression trees can be used + % to estimate the face's landmark positions directly from a sparse subset + % of pixel intensities, achieving super-realtime performance with + % high-quality predictions. + % + % ## Face Alignment + % Face alignment is a computer vision technology for identifying the + % geometric structure of human faces in digital images. Given the location + % and size of a face, it automatically determines the shape of the face + % components such as eyes, nose, and lips. + % + % A face alignment program typically operates by iteratively adjusting a + % deformable models, which encodes the prior knowledge of face shape or + % appearance, to take into account the low-level image evidences and find + % the face that is present in the image. + % + % ## References + % [Kazemi2014]: + % > Vahid Kazemi and Josephine Sullivan, "One Millisecond Face Alignment + % > with an Ensemble of Regression Trees", CVPR 2014. + % > [PDF](https://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Kazemi_One_Millisecond_Face_2014_CVPR_paper.pdf) + % + % See also: cv.Facemark, cv.FacemarkKazemi.FacemarkKazemi + % + + properties (SetAccess = private) + % Object ID + id + % name of custom face detector function + % (due to an implementation detail, + % we must maintain its state and keep passing it on each call) + func + end + + %% Constructor/destructor + methods + function this = FacemarkKazemi(varargin) + %FACEMARKKAZEMI Constructor + % + % obj = cv.FacemarkKazemi() + % obj = cv.FacemarkKazemi('OptionName',optionValue, ...) + % + % ## Options + % * __CascadeDepth__ depth of cascade used for training. + % default 15 + % * __TreeDepth__ max height of the regression tree built. + % default 5 + % * __NumTreesPerCascadeLevel__ number of trees fit per cascade + % level. default 500 + % * __LearningRate__ learning rate in gradient boosting, also + % refered to as shrinkage. default 0.1 + % * __OversamplingAmount__ number of initializations used to + % create training samples. default 20 + % * __NumTestCoordinates__ number of test coordinates. default 500 + % * __Lambda__ value to calculate probability of closeness of two + % coordinates. default 0.1 + % * __NumTestSplits__ number of random test splits generated. + % default 20 + % * __ConfigFile__ name of the file containing the values of + % training parameters. default '' + % + % These variables are used for training data. They are initialised + % as described in the referenced research paper. + % + % See also: cv.FacemarkKazemi.loadModel + % + this.func = ''; + this.id = FacemarkKazemi_(0, this.func, 'new', varargin{:}); + end + + function delete(this) + %DELETE Destructor + % + % obj.delete() + % + % See also: cv.FacemarkKazemi + % + if isempty(this.id), return; end + FacemarkKazemi_(this.id, this.func, 'delete'); + end + end + + %% FacemarkKazemi + methods + function success = training(this, images, landmarks, configFile, scale, varargin) + %TRAINING Trains a facemark model + % + % success = obj.training(images, landmarks, configFile, scale) + % success = obj.training(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __images__ cell-array of images which are used in training + % samples. + % * __landmarks__ cell-array of cell-array of points which stores + % the landmarks detected in a particular image. + % * __configFile__ name of the file storing parameters for + % training the model. For an example, see the + % |facemark_kazemi_train_config_demo.m| sample. + % * __scale__ size to which all images and landmarks have to be + % scaled to `[w,h]`. + % + % ## Output + % * __success__ returns true if the model is trained properly or + % false if it is not trained. + % + % ## Options + % * __ModelFilename__ name of the trained model file that has to + % be saved. default 'face_landmarks.dat' + % + % Trains a facemark model using gradient boosting to get a cascade + % of regressors which can then be used to predict shape. + % + % See also: cv.FacemarkKazemi.loadModel + % + success = FacemarkKazemi_(this.id, this.func, 'training', images, landmarks, configFile, scale, varargin{:}); + end + + function loadModel(this, filename) + %LOADMODEL Load the trained model + % + % obj.loadModel(filename) + % + % ## Input + % * __filename__ A string which stores the name of the file in + % which trained model is stored. + % + % See also: cv.FacemarkKazemi.fit + % + FacemarkKazemi_(this.id, this.func, 'loadModel', filename); + end + + function [landmarks, success] = fit(this, img, faces, varargin) + %FIT Retrieves a centered and scaled face shape, according to the bounding rectangle + % + % [landmarks, success] = obj.fit(img, faces) + % [...] = obj.fit(..., 'OptionName',optionValue, ...) + % + % ## Input + % * __img__ Input image whose landmarks have to be found. + % * __faces__ Cell-array of bounding boxes of faces found in a + % given image `{[x,y,w,h], ...}`. + % + % ## Output + % * __landmarks__ Cell-array of cell-array of points which stores + % the landmarks of all the faces found in the image + % `{{[x,y], ...}, ...}`. + % * __success__ success flag. + % + % See also: cv.FacemarkKazemi.loadModel + % + [landmarks, success] = FacemarkKazemi_(this.id, this.func, 'fit', img, faces, varargin{:}); + end + + function success = setFaceDetector(this, detector) + %SETFACEDETECTOR Set the custom face detector + % + % success = obj.setFaceDetector(detector) + % + % ## Input + % * __detector__ The user-defined face detector, MATLAB function + % name. + % + % ## Output + % * __success__ success flag. + % + % The user-defined face detector should have the following + % signature: + % + % function faces = myFaceDetector(img) + % + % where `img` is the input image and `faces` is the output with + % the detected faces. + % + % See also: cv.FacemarkKazemi.getFaces + % + this.func = detector; + success = FacemarkKazemi_(this.id, this.func, 'setFaceDetector', detector); + end + + function [faces, success] = getFaces(this, img) + %GETFACES Detect faces using the custom detector + % + % [faces, success] = obj.getFaces(img) + % + % ## Input + % * __img__ Input image. + % + % ## Output + % * __faces__ Detected faces `{[x,y,w,h], ...}`. Each face is + % stored as rect. + % * __success__ success flag. + % + % See also: cv.FacemarkKazemi.setFaceDetector + % + [faces, success] = FacemarkKazemi_(this.id, this.func, 'getFaces', img); + end + end + + %% Algorithm + methods (Hidden) + function clear(this) + %CLEAR Clears the algorithm state + % + % obj.clear() + % + % See also: cv.FacemarkKazemi.empty, cv.FacemarkKazemi.load + % + FacemarkKazemi_(this.id, this.func, 'clear'); + end + + function b = empty(this) + %EMPTY Checks if detector object is empty + % + % b = obj.empty() + % + % ## Output + % * __b__ Returns true if the detector object is empty (e.g in the + % very beginning or after unsuccessful read). + % + % See also: cv.FacemarkKazemi.clear, cv.FacemarkKazemi.load + % + b = FacemarkKazemi_(this.id, this.func, 'empty'); + end + + function varargout = save(this, filename) + %SAVE Saves a Facemark and its model state + % + % obj.save(filename) + % str = obj.save(filename) + % + % ## Input + % * __filename__ The filename to store this Facemark to (XML/YAML). + % + % ## Output + % * __str__ optional output. If requested, the model is persisted + % to a string in memory instead of writing to disk. + % + % Saves this model to a given filename, either as XML or YAML. + % + % Saves the state of a model to the given filename. + % + % See also: cv.FacemarkKazemi.load + % + [varargout{1:nargout}] = FacemarkKazemi_(this.id, this.func, 'write', filename); + end + + function load(this, fname_or_str, varargin) + %LOAD Loads a Facemark and its model state + % + % obj.load(fname) + % obj.load(str, 'FromString',true) + % + % ## Input + % * __fname__ The filename to load this Facemark from (XML/YAML). + % * __str__ String containing the serialized model you want to + % load. + % + % ## Options + % * __FromString__ Logical flag to indicate whether the input is a + % filename or a string containing the serialized model. + % default false + % + % Loads a persisted model and state from a given XML or YAML file. + % + % See also: cv.FacemarkKazemi.save + % + FacemarkKazemi_(this.id, this.func, 'read', fname_or_str, varargin{:}); + end + + function name = getDefaultName(this) + %GETDEFAULTNAME Returns the algorithm string identifier + % + % name = obj.getDefaultName() + % + % ## Output + % * __name__ This string is used as top level XML/YML node tag + % when the object is saved to a file or string. + % + % See also: cv.FacemarkKazemi.save, cv.FacemarkKazemi.load + % + name = FacemarkKazemi_(this.id, this.func, 'getDefaultName'); + end + end + +end diff --git a/opencv_contrib/samples/face_swapping_demo.m b/opencv_contrib/samples/face_swapping_demo.m new file mode 100644 index 000000000..6a16abe7e --- /dev/null +++ b/opencv_contrib/samples/face_swapping_demo.m @@ -0,0 +1,293 @@ +%% Face swapping using face landmark detection +% +% This demo lets you swap a face in one image with another face in another +% image. It first detects faces in both images and finds its landmarks. Then +% it swaps the face in first image with in another image. +% +% Sources: +% +% * +% + +%% Options + +% [INPUT] path to the first/second images in which you want to apply face swapping +im1 = fullfile(mexopencv.root(),'test','lena.jpg'); % source +im2 = which('kids.tif'); % destination + +% [INPUT] path to binary file storing the trained model to load +modelFile = fullfile(mexopencv.root(),'test','face_landmark_model.dat'); +if exist(modelFile, 'file') ~= 2 + % download model from GitHub + disp('Downloading model (~ 69MB)...') + url = 'https://cdn.rawgit.com/opencv/opencv_3rdparty/contrib_face_alignment_20170818/face_landmark_model.dat'; + urlwrite(url, modelFile); +end + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +%% Images +% load and show images +img1 = cv.imread(im1); +img2 = cv.imread(im2); +subplot(121), imshow(img1), title('source') +subplot(122), imshow(img2), title('destination') + +%% +% resized images as it is easier to process small images, +% resized according to their actual ratio +ratio1 = size(img1,2) / size(img1,1); +ratio2 = size(img2,2) / size(img2,1); +img1 = cv.resize(img1, fix(640 * [ratio1, ratio1])); +img2 = cv.resize(img2, fix(640 * [ratio2, ratio2])); + +%% Init +% create instance of the face landmark detection class, +% and set the face detector function, then load the pre-trained model +obj = cv.FacemarkKazemi(); +obj.setFaceDetector(faceDetectFcn); +obj.loadModel(modelFile); + +%% Detect +% detect faces in both images +faces1 = obj.getFaces(img1); +faces2 = obj.getFaces(img2); +assert(~isempty(faces1) && ~isempty(faces2), 'No faces found'); + +%% +% in case of multiple detections, take the biggest face in each image +if numel(faces1) > 1 + [~,ind] = max(cellfun(@cv.Rect.area, faces1)); + faces1 = faces1(ind); +end +if numel(faces2) > 1 + [~,ind] = max(cellfun(@cv.Rect.area, faces2)); + faces2 = faces2(ind); +end + +%% +% detect landmarks in both images +shapes1 = obj.fit(img1, faces1); +shapes2 = obj.fit(img2, faces2); +assert(~isempty(shapes1) && ~isempty(shapes2), 'No landmarks found'); +pts1 = shapes1{1}; +pts2 = shapes2{1}; + +%% +% show landmarks +figure +subplot(121), imshow(drawLandmarks(img1, pts1)), title('source') +subplot(122), imshow(drawLandmarks(img2, pts2)), title('destination') + +%% Swap +% First compute convex hull to find the boundary points of the face in the +% image which has to be swapped. +% +% Next as we need to warp one face over the other, we need to find affine +% transform. To find affine transform in OpenCV, it requires three set of +% points to calculate the affine matrix. Also we just need to warp the face +% instead of the surrounding regions. Hence we divide the face into triangles +% so that each triangle can be easily warped onto the other image. +% +% The function |divideIntoTriangles| divides the detected faces into triangles. +% The function |warpTriangle| then warps each triangle of one image to other +% image to swap the faces. + +%% +% compute convex hull +indices = cv.convexHull(pts2, 'ReturnPoints',false); +pts1 = pts1(indices + 1); +pts2 = pts2(indices + 1); + +%% +% Triangulation for points on the convex hull +rect = [0, 0, size(img2,2), size(img2,1)]; +triangles = divideIntoTriangles(rect, pts2); + +%% +% Apply affine transformation to Delaunay triangles +img1 = single(img1); +img1Warped = single(img2); +for i=1:numel(triangles) + % Get matching triangles points in img1 and img2 + tr1 = pts1(triangles{i} + 1); + tr2 = pts2(triangles{i} + 1); + % warp tr1 in img1 into tr2 in img2 + img1Warped = warpTriangle(img1, img1Warped, tr1, tr2); +end +img1Warped = uint8(img1Warped); + +%% +% show result +figure, imshow(img1Warped) + +%% Seamless cloning +% Even after warping, the results somehow look unnatural. Hence to improve the +% results we apply seamless cloning to get the desired results as required. + +%% +% create mask from convex hull +mask = zeros(rect(4), rect(3), 'uint8'); +mask = cv.fillConvexPoly(mask, pts2, 'Color',255); + +%% +% Clone seamlessly +r = cv.boundingRect(pts2); +center = r(1:2) + r(3:4)/2; +img1Warped = cv.seamlessClone(img1Warped, img2, mask, center, 'Method','NormalClone'); + +%% +% show result +figure, imshow(img1Warped) + +%% Helper function + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +function img = drawLandmarks(img, pts, varargin) + %DRAWLANDMARKS Draw facial landmark points + % + % img = drawLandmarks(img, pts) + % img = drawLandmarks(img, pts, 'OptionName',optionValue, ...) + % + % ## Input + % * __img__ input image + % * __pts__ face landmarks (68 points) + % + % ## Output + % * __img__ output image with drawn landmarks + % + % ## Options + % Optional drawing params passed to cv.polylines function (color, + % thickness, line type). + % + % The function assumes annotations following the Multi-PIE 68 points + % mark-up, as described in: + % [i-bug][https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/]. + % + % For reference, see: + % + % ![image][https://ibug.doc.ic.ac.uk/media/uploads/images/annotpics/figure_68_markup.jpg] + % + % See also: cv.Facemark.drawFacemarks + % + + % points + %{ + p1 = { + pts(1:17), ... % chin + pts(18:22), ... % left eyebrow + pts(23:27), ... % right eyebrow + pts(28:31) % nose top part + }; + p2 = { + pts(31:36), ... % nose bottom part + pts(37:42), ... % left eye + pts(43:48), ... % right eye + pts(49:60), ... % lips outer part + pts(61:68) % lips inside part + }; + %} + p1 = mat2cell(pts(1:31), 1, [17 5 5 4]); + p2 = mat2cell(pts(31:end), 1, [6 6 6 12 8]); + + % draw polylines (p1: not closed, p2: closed) + opts = {'Color',[0 255 0], 'Thickness',2, 'LineType','AA'}; + opts = [opts varargin]; + img = cv.polylines(img, p1, 'Closed',false, opts{:}); + img = cv.polylines(img, p2, 'Closed',true, opts{:}); +end + +function delaunayTri = divideIntoTriangles(rect, points) + %DIVIDEINTOTRIANGLES Divide the face into triangles for warping + + % Create an instance of Subdiv2D, insert points, and get triangles + subdiv = cv.Subdiv2D(rect); + subdiv.insert(points); + triangleList = subdiv.getTriangleList(); + + delaunayTri = {}; + for i=1:numel(triangleList) + % 3-points triangle + tr = triangleList{i}; + tr = {tr(1:2), tr(3:4), tr(5:6)}; + + % skip triangle if not all its points are within image ROI + if all(cellfun(@(pt) cv.Rect.contains(rect, pt), tr)) + % corresponding indices into convex hull points + [~,ind] = cv.batchDistance(cat(1,tr{:}), cat(1,points{:}), ... + 'NormType','L1', 'K',1); + delaunayTri{end+1} = ind; + end + end +end + +function img2 = warpTriangle(img1, img2, tr1, tr2) + %WARPTRIANGLE Warp triangle1 in img1 into corresponding triangle2 in img2 + + rect1 = cv.boundingRect(tr1); + rect2 = cv.boundingRect(tr2); + + % Offset points by left top corner of the respective rectangles + tr1Rect = cellfun(@(pt) pt - rect1(1:2), tr1, 'UniformOutput',false); + tr2Rect = cellfun(@(pt) pt - rect2(1:2), tr2, 'UniformOutput',false); + + % estimate transformation from source to destination triangles + warp_mat = cv.getAffineTransform(tr1Rect, tr2Rect); + + % Apply transformation to small rectangular patch + img1Rect = cv.Rect.crop(img1, rect1); + img2Rect = cv.warpAffine(img1Rect, warp_mat, 'DSize',rect2(3:4), ... + 'BorderType','Reflect101'); + + % Get mask by filling triangle + mask = zeros([rect2([4 3]) 3], 'single'); + mask = cv.fillConvexPoly(mask, tr2Rect, 'Color',[1 1 1], 'LineType','AA'); + + % cut out triangle and paste it on top of destination image + img2Rect = cv.multiply(img2Rect, mask); + img2 = cv.Rect.crop(img2, rect2, cv.multiply(cv.Rect.crop(img2, rect2), 1 - mask)); + img2 = cv.Rect.crop(img2, rect2, cv.Rect.crop(img2, rect2) + img2Rect); +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/samples/facemark_aam_train_demo.m b/opencv_contrib/samples/facemark_aam_train_demo.m new file mode 100644 index 000000000..c1a342481 --- /dev/null +++ b/opencv_contrib/samples/facemark_aam_train_demo.m @@ -0,0 +1,324 @@ +%% Facemark AAM training demo +% +% The user should provides the list of training images accompanied by their +% corresponding landmarks location in separate files. +% +% See below for a description of file formats. +% +% Examples of datasets are available at +% . +% +% Sources: +% +% * +% * +% + +%% Preparation +% +% Before you continue with this tutorial, you should download a training +% dataset of facial landmarks detection. +% +% We suggest you to download the LFPW dataset which can be retrieved at +% . +% +% First thing to do is to make two text files containing the list of image +% files and annotation files respectively. Make sure that the order of images +% and annotations in both files are matched. Furthermore, it is advised to use +% absolute paths instead of relative paths. +% +% Example to make the file list in Linux machine: +% +% ls /data/lfpw/trainset/*.png > images_train.txt +% ls /data/lfpw/trainset/*.pts > annotations_train.txt +% +% Optionally, you can also create similar files list for the testset. +% +% Example of content in the |images_train.txt| file: +% +% /data/lfpw/trainset/image_0001.png +% /data/lfpw/trainset/image_0002.png +% /data/lfpw/trainset/image_0003.png +% ... +% +% Example of content in the |annotations_train.txt| file: +% +% /data/lfpw/trainset/image_0001.pts +% /data/lfpw/trainset/image_0002.pts +% /data/lfpw/trainset/image_0003.pts +% ... +% +% where a |.pts| file contains the position of each face landmark. +% Make sure that the annotation format is supported by the API, where the +% contents should look like the following snippet: +% +% version: 1 +% n_points: 68 +% { +% 212.716603 499.771793 +% 230.232816 566.290071 +% ... +% } +% +% Once trained, we show how to use the model to detect face landmarks in a +% test image. +% +% In this tutorial, the pre-trained model will not be provided due to its +% large file size (~500MB). By following this tutorial, you will be able to +% train and obtain your own trained model within few minutes. +% + +%% Options + +% [INPUT] path of a text file contains the list of paths to all training images +imgList = fullfile(mexopencv.root(),'test','facemark','lfpw','images.lst'); +assert(exist(imgList, 'file') == 2, 'missing images list file'); + +% [INPUT] path of a text file contains the list of paths to all annotations files +ptsList = fullfile(mexopencv.root(),'test','facemark','lfpw','annotations.lst'); +assert(exist(ptsList, 'file') == 2, 'missing annotations list file'); + +% [OUTPUT] path for saving the trained model +modelFile = fullfile(tempdir(), 'model_aam.yaml'); + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','haarcascade_frontalface_alt.xml'); +download_classifier_xml(xmlFace); + +% [INPUT] path to the cascade xml file for the eyes detector +xmlEyes = fullfile(mexopencv.root(),'test','haarcascade_eye_tree_eyeglasses.xml'); +download_classifier_xml(xmlEyes); + +% path to test image +testImg = fullfile(mexopencv.root(),'test','lena.jpg'); + +%% Init +% create the facemark instance +scales = [2.0, 4.0]; +obj = cv.Facemark('AAM', 'Scales',scales, ... + 'ModelFilename',modelFile, 'SaveModel',true, 'Verbose',true); + +%% +% In this case, we modified the default list of the scaling factor. +% By default, the scaling factor used is 1.0 (no scaling). Here we add two +% more scaling factor which will make the instance trains two more model at +% scale 2 and 4 (2 times smaller and 4 times smaller, with faster fitting +% time). However, you should make sure that this scaling factor is not too +% big since it will make the image scaled into a very small one. Thus it will +% lose all of its important information for the landmark detection purpose. + +%% Data +% load the dataset, and add training samples one-by-one +disp('Loading data...') +[imgFiles, ptsFiles] = cv.Facemark.loadDatasetList(imgList, ptsList); +for i=1:numel(imgFiles) + % load image and its corresponding annotation data, then add pair + img = cv.imread(imgFiles{i}); + pts = cv.Facemark.loadFacePoints(ptsFiles{i}); + obj.addTrainingSample(img, pts); +end + +%% Train +% train the algorithm, model will be saved to specified file +disp('Training...') +tic +obj.training(); +toc + +%% Prepare for Test +% Since the AAM algorithm needs initialization parameters (rotation, +% translation, and scaling), we need to declare the required variable to store +% these information which will be obtained using a custom function. The +% implementation of |getInitialFitting| function in this example is not +% optimal, you can always create your own function. +% +% The initialization is obtained by comparing the base shape of the trained +% model with the current face image. In this case, the rotation is obtained by +% comparing the angle of line formed by two eyes in the input face image with +% the same line in the base shape. Meanwhile, the scaling is obtained by +% comparing the length of line between eyes in the input image compared to the +% base shape. +% +% The fitting process starts by detecting faces in given image. +% +% If at least one face is found, then the next step is computing the +% initialization parameters. In this case, since |getInitialFitting| function +% is not optimal, it may not find pair of eyes from a given face. Therefore, +% we will filter out faces without initialization parameters and in this case, +% each element in the |confs| vector represent the initialization parameters +% for each filtered face. + +%% +% create cascade detector objects (for face and eyes) +ccFace = cv.CascadeClassifier(xmlFace); +ccEyes = cv.CascadeClassifier(xmlEyes); + +%% +% detect faces +img = cv.imread(testImg); +faces = myFaceDetector(img, ccFace); +assert(~isempty(faces), 'no faces found'); +fprintf('%d faces\n', numel(faces)); + +%% +% get base shape from trained model +s0 = obj.getData(); +s0 = cat(1, s0{:}); + +%% +% compute initialization params for each detected face +S = struct('R',eye(2), 't',[0 0], 'scale',1); +confs = S([]); +faces_eyes = {}; +for i=1:numel(faces) + [conf, found] = getInitialFitting(img, faces{i}, s0, ccEyes); + if found + confs(end+1) = conf; + faces_eyes{end+1} = faces{i}; + end +end +assert(~isempty(confs), 'failed to compute initialization params'); +fprintf('%d faces with eyes\n', numel(confs)); + +%% +% For the fitting parameters stored in the |confs| vector, |scaleIdx| field +% represents the ID of scaling factor that will be used in the fitting process. +% In this example the fitting will use the biggest scaling factor (4) which is +% expected to have the fastest computation time compared to the other scales. +% If the ID is bigger than the available trained scales in the model, the +% model with the biggest scale ID is used. +confs.scaleIdx = numel(scales) - 1; + +%% Test +% The fitting process is quite simple, you just need to pass the image, array +% of rectangles representing the ROIs of all faces in the given image, and the +% configuration params. It returns the landmark points. +tic +landmarks = obj.fit(img, faces_eyes, 'Configs',confs); +toc + +%% +% After the fitting process is finished, we can visualize the result +for i=1:numel(landmarks) + img = cv.Facemark.drawFacemarks(img, landmarks{i}, 'Color',[0 255 0]); +end +imshow(img) + +%% Helper functions + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +function faces = myFaceDetector(img, ccFace) + %MYFACEDETECTOR Detect faces + % + % faces = myFaceDetector(img, ccFace) + % + % ## Input + % * __img__ input image + % * __ccFace__ cascade object for face detection + % + % ## Output + % * __faces__ detected faces, `{[x,y,w,h], ...}` + % + % See also: cv.Facemark.getFacesHAAR + % + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = ccFace.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end + +function [conf, found] = getInitialFitting(img, face, s0, ccEyes) + %GETINITIALFITTING Calculate AAM intial fit params + % + % [conf, found] = getInitialFitting(img, face, s0, ccEyes) + % + % ## Input + % * __img__ input image + % * __face__ detected face `[x,y,w,h]` + % * __s0__ base shape of the trained model + % * __ccEyes__ cascade object for eyes detection + % + % ## Output + % * __conf__ struct with rotation, translation, and scale + % * __found__ success flag + % + + found = false; + conf = struct('R',eye(2), 't',[0 0], 'scale',1.0); + + % detect eyes in face + if cv.Rect.area(face) == 0, return; end + faceROI = cv.Rect.crop(img, face); + eyes = ccEyes.detect(faceROI, 'ScaleFactor',1.1, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[20 20]); + if numel(eyes) ~= 2, return; end + + % make sure that first is left eye, second is right eye + if eyes{2}(1) < eyes{1}(1) + eyes = eyes([2 1]); + end + + % eyes centers in detected face + c1 = face(1:2) + eyes{1}(1:2) + eyes{1}(3:4)/2; % left eye + c2 = face(1:2) + eyes{2}(1:2) + eyes{2}(3:4)/2; % right eye + assert(c1(1) < c2(1), 'eyes not ordered correctly (left then right)'); + + % eyes centers in base shape (shifted to middle of image) + base = bsxfun(@plus, s0, [size(img,2) size(img,1)]/2); + c1Base = (base(37,:) + base(40,:)) / 2; % left eye + c2Base = (base(43,:) + base(46,:)) / 2; % right eye + + % scale between the two line length in detected and base shape + scale = norm(c2 - c1) / norm(c2Base - c1Base); + + % eyes centers in scaled base shape (not shifted) + base = s0 * scale; + c1Base = (base(37,:) + base(40,:)) / 2; + c2Base = (base(43,:) + base(46,:)) / 2; + + % angle of horizontal line connecting eyes centers in scaled base shape + aBase = atan2(c2Base(2) - c1Base(2), c2Base(1) - c1Base(1)); + + % angle of horizontal line connecting eyes centers in detect face + a = atan2(c2(2) - c1(2), c2(1) - c1(1)); + + % rotation matrix from the two angles + R = cv.getRotationMatrix2D([0 0], rad2deg(aBase-a), 1.0); + R = R(1:2,1:2); + + % eyes centers in transformed base shape (scaled then rotated) + base = (R * scale * s0')'; + c1Base = (base(37,:) + base(40,:)) / 2; + c2Base = (base(43,:) + base(46,:)) / 2; + + % translation between detected and transformed base shape + t = c1 - c1Base; + + % fill output + found = true; + conf.R = R; + conf.t = t; + conf.scale = scale; +end diff --git a/opencv_contrib/samples/facemark_kazemi_detect_img_demo.m b/opencv_contrib/samples/facemark_kazemi_detect_img_demo.m new file mode 100644 index 000000000..21397a119 --- /dev/null +++ b/opencv_contrib/samples/facemark_kazemi_detect_img_demo.m @@ -0,0 +1,105 @@ +%% Face landmark detection in an image +% Face landmark detection in an image using ensemble of regression trees. +% +% This demo lets you detect landmarks of detected faces in an image. You can +% detect landmarks of all the faces found in an image and use them further in +% various applications like face swapping, face averaging etc. +% +% <> +% +% Sources: +% +% * +% * +% + +%% Options + +% [INPUT] path to input image +im = fullfile(mexopencv.root(),'test','lena.jpg'); + +% [INPUT] path to binary file storing the trained model to load +modelFile = fullfile(mexopencv.root(),'test','face_landmark_model.dat'); +if exist(modelFile, 'file') ~= 2 + % download model from GitHub + disp('Downloading model (~ 69MB)...') + url = 'https://cdn.rawgit.com/opencv/opencv_3rdparty/contrib_face_alignment_20170818/face_landmark_model.dat'; + urlwrite(url, modelFile); +end + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +% width/height to scale images, as larger images are slower to process +scale = [460 460]; + +%% Init +% create instance of the face landmark detection class, +% and set the face detector function, then load the pre-trained model +obj = cv.FacemarkKazemi(); +obj.setFaceDetector(faceDetectFcn); +obj.loadModel(modelFile); + +%% Image +% load image +img = cv.imread(im); +%img = cv.resize(img, scale); + +%% Detect +% detect faces in image, and get shapes of all detected faces, +% then draw bounding boxes around the faces and mark the landmarks +faces = obj.getFaces(img); +assert(~isempty(faces), 'No faces found'); +[shapes, success] = obj.fit(img, faces); +if success + img = cv.rectangle(img, faces, 'Color',[0 255 0]); + for i=1:numel(shapes) + img = cv.circle(img, shapes{i}, 3, 'Color',[0 0 255], 'Thickness','Filled'); + end +end +imshow(img) + +%% Helper function + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/samples/facemark_kazemi_detect_vid_demo.m b/opencv_contrib/samples/facemark_kazemi_detect_vid_demo.m new file mode 100644 index 000000000..7fccf83a5 --- /dev/null +++ b/opencv_contrib/samples/facemark_kazemi_detect_vid_demo.m @@ -0,0 +1,124 @@ +%% Face landmark detection in a video +% Face landmark detection in a video running at real time. +% +% This demos lets you detect landmarks of detected faces in a video. It first +% detects faces in a current video frame and then finds their facial landmarks. +% +% Example video: . +% +% Sources: +% +% * +% * +% + +%% Options + +% [INPUT] path to input video +vid = fullfile(mexopencv.root(),'test','dudek.webm'); + +% [INPUT] path to binary file storing the trained model to load +modelFile = fullfile(mexopencv.root(),'test','face_landmark_model.dat'); +if exist(modelFile, 'file') ~= 2 + % download model from GitHub + disp('Downloading model (~ 69MB)...') + url = 'https://cdn.rawgit.com/opencv/opencv_3rdparty/contrib_face_alignment_20170818/face_landmark_model.dat'; + urlwrite(url, modelFile); +end + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +% width/height to scale images, as larger images are slower to process +scale = [600 600]; + +%% Init +% create instance of the face landmark detection class, +% and set the face detector function, then load the pre-trained model +obj = cv.FacemarkKazemi(); +obj.setFaceDetector(faceDetectFcn); +obj.loadModel(modelFile); + +%% Video +% open video, and prepare figure +cap = cv.VideoCapture(vid); +assert(cap.isOpened(), 'Failed to load video'); +img = cap.read(); +assert(~isempty(img), 'Failed to read frame'); +hImg = imshow(img); + +%% Detect +% main loop: read each frame and detect faces and the landmarks corresponding +% to each shape detected, then display the current frame +while ishghandle(hImg) + % read frame + img = cap.read(); + if isempty(img), break; end + + % detect faces + %img = cv.resize(img, scale); + faces = obj.getFaces(img); + if ~isempty(faces) + % draw bounding box + img = cv.rectangle(img, faces, 'Color',[0 255 0]); + + % detect face landmarks + [shapes, success] = obj.fit(img, faces); + if success + % draw face landmarks + for i=1:numel(shapes) + img = cv.circle(img, shapes{i}, 3, 'Color',[0 0 255], 'Thickness','Filled'); + end + end + end + + % show frame + results + set(hImg, 'CData',img) + drawnow +end +cap.release(); + +%% Helper function + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/samples/facemark_kazemi_train2_demo.m b/opencv_contrib/samples/facemark_kazemi_train2_demo.m new file mode 100644 index 000000000..a2f3a72f9 --- /dev/null +++ b/opencv_contrib/samples/facemark_kazemi_train2_demo.m @@ -0,0 +1,175 @@ +%% Training face landmark detector +% +% This demo helps to train your own face landmark detector. +% The user should provide the list of training images accompanied by their +% corresponding landmarks location in separated files. +% +% Examples of datasets are available at +% . +% +% We suggest you to download the HELEN dataset which can be retrieved at +% +% (Caution: The algorithm requires considerable RAM to train on this dataset). +% +% Example of contents for |images.txt| file: +% +% /data/helen/100032540_1.jpg +% /data/helen/100040721_1.jpg +% /data/helen/100040721_2.jpg +% ... +% +% Example of contents for |annotations.txt| file: +% +% /data/helen/100032540_1.pts +% /data/helen/100040721_1.pts +% /data/helen/100040721_2.pts +% ... +% +% where a |.pts| file contains the position of each face landmark. +% Example of contents for |.pts| files: +% +% version: 1 +% n_points: 68 +% { +% 212.716603 499.771793 +% 230.232816 566.290071 +% ... +% } +% +% For a description of training parameters used in |configFile|, see the demo +% |facemark_kazemi_train_config_demo.m|. +% +% <> +% +% You can also download a pre-trained model |face_landmark_model.dat|, +% see the demo |facemark_kazemi_detect_img_demo|. +% (that way you can skip training and simply load the model). +% +% Sources: +% +% * +% * +% + +%% Options + +% [INPUT] path of a text file contains the list of paths to all training images +imgList = fullfile(mexopencv.root(),'test','facemark','helen','images.lst'); +assert(exist(imgList, 'file') == 2, 'missing images list file'); + +% [INPUT] path of a text file contains the list of paths to all annotations files +ptsList = fullfile(mexopencv.root(),'test','facemark','helen','annotations.lst'); +assert(exist(ptsList, 'file') == 2, 'missing annotations list file'); + +% [INPUT] path to configuration xml file containing parameters for training +% https://github.com/opencv/opencv_contrib/raw/3.4.0/modules/face/samples/sample_config_file.xml +configFile = fullfile(mexopencv.root(),'test','facemark','config.xml'); +assert(exist(configFile, 'file') == 2, 'missing train config file'); + +% [OUTPUT] path for saving the trained model +modelFile = fullfile(tempdir(), 'model_kazemi.dat'); + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +% width/height which you want all images to get to scale the annotations. +% larger images are slower to process +scale = [460 460]; + +%% Data + +% load names of images and annotation files +disp('Loading data...') +[imgFiles, ptsFiles] = cv.Facemark.loadDatasetList(imgList, ptsList); + +% load images and their corresponding landmarks +imgs = cell(size(imgFiles)); +pts = cell(size(ptsFiles)); +for i=1:numel(imgFiles) + imgs{i} = cv.imread(imgFiles{i}); + pts{i} = cv.Facemark.loadFacePoints(ptsFiles{i}); +end + +%% Init +% create instance of the face landmark detection class, +% and set the face detector function +obj = cv.FacemarkKazemi('ConfigFile',configFile); +obj.setFaceDetector(faceDetectFcn); + +%% Train +% perform training +disp('Training...') +tic +success = obj.training(imgs, pts, configFile, scale, 'ModelFilename',modelFile); +toc +if success + disp('Training successful') +else + disp('Training failed') +end + +%% +% In the above call, |scale| is passed to scale all images and their +% corresponding landmarks, as it takes greater time to process large images. +% After scaling data it calculates mean shape of the data which is used as +% initial shape while training. It trains the model and stores the trained +% model file with the specified filename. As the training starts, you will see +% something like this: +% +% <> +% +% The error rate on trained images depends on the number of images used for +% training: +% +% <> +% +% The error rate on test images depends on the number of images used for +% training: +% +% <> +% + +%% Helper functions + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/samples/facemark_kazemi_train_config_demo.m b/opencv_contrib/samples/facemark_kazemi_train_config_demo.m new file mode 100644 index 000000000..7ade49ab5 --- /dev/null +++ b/opencv_contrib/samples/facemark_kazemi_train_config_demo.m @@ -0,0 +1,68 @@ +%% Parameters for training face landmark detector +% +% The configuration file mentioned below contains the training parameters +% which are required for training. Description of parameters is as follows: +% +% # *Cascade depth*: This stores the depth of cascade of regressors used for +% training. +% # *Tree depth*: This stores the depth of trees created as weak learners +% during gradient boosting. +% # *Number of trees per cascade level*: This stores number of trees required +% per cascade level. +% # *Learning rate*: This stores the learning rate for gradient boosting. This +% is required to prevent overfitting using shrinkage. +% # *Oversampling amount*: This stores the oversampling amount for the samples. +% # *Number of test coordinates*: This stores number of test coordinates to be +% generated as samples to decide for making the split. +% # *Lambda*: This stores the value used for calculating the probabilty which +% helps to select closer pixels for making the split. +% # *Number of test splits*: This stores the number of test splits to be +% generated before making the best split. +% +% To get more detailed description about the training parameters you can refer +% to the +% . +% +% These variables have been initialised below as defined in the research paper +% "One millisecond face alignment" CVPR 2014 +% +% Sources: +% +% * +% * +% * +% + +% [OUTPUT] path to file which you want to create as config file +configFile = fullfile(tempdir(), 'config_kazemi.xml'); + +S = struct(); + +% stores the depth of cascade of regressors used for training. +S.cascade_depth = uint32(15); + +% stores the depth of trees created as weak learners during gradient boosting. +S.tree_depth = uint32(4); + +% stores number of trees required per cascade level. +S.num_trees_per_cascade_level = uint32(500); + +% stores the learning rate for gradient boosting. +S.learning_rate = 0.1; + +% stores the oversampling amount for the samples. +S.oversampling_amount = uint32(20); + +% stores number of test coordinates required for making the split. +S.num_test_coordinates = uint32(400); + +% stores the value used for calculating the probabilty. +S.lambda = 0.1; + +% stores the number of test splits to be generated before making the best split. +S.num_test_splits = uint32(20); + +% write file +cv.FileStorage(configFile, S); +disp('Write Done') +type(configFile) diff --git a/opencv_contrib/samples/facemark_kazemi_train_demo.m b/opencv_contrib/samples/facemark_kazemi_train_demo.m new file mode 100644 index 000000000..941f2971a --- /dev/null +++ b/opencv_contrib/samples/facemark_kazemi_train_demo.m @@ -0,0 +1,161 @@ +%% Training face landmark detector +% +% This demo helps to train your own face landmark detector. You can train your +% own face landmark detection by just providing the paths for directory +% containing the images and files containing their corresponding face +% landmarks. As this landmark detector was originally trained on +% , the training +% follows the format of data provided in HELEN dataset. +% +% The dataset consists of |.txt| files whose first line contains the image +% name which then follows the annotations. The format of a file containing +% annotations should be the following: +% +% /data/helen/100032540_1.jpg +% 565.86 , 758.98 +% 564.27 , 781.14 +% ... +% +% The above format is similar to HELEN dataset which is used for training the +% model. +% +% For a description of training parameters used in |configFile|, see the demo +% |facemark_kazemi_train_config_demo.m|. +% +% <> +% +% You can also download a pre-trained model |face_landmark_model.dat|, +% see the demo |facemark_kazemi_detect_img_demo|. +% (that way you can skip training and simply load the model). +% +% Sources: +% +% * +% * +% + +%% Options + +% [INPUT] path to the directory containing all text and image files +dname = fullfile(mexopencv.root(),'test','facemark','helen'); +assert(isdir(dname), 'missing data directory'); + +% [INPUT] path to configuration xml file containing parameters for training +% https://github.com/opencv/opencv_contrib/raw/3.4.0/modules/face/samples/sample_config_file.xml +configFile = fullfile(mexopencv.root(),'test','facemark','config.xml'); +assert(exist(configFile, 'file') == 2, 'missing train config file'); + +% [OUTPUT] path for saving the trained model +modelFile = fullfile(tempdir(), 'model_kazemi.dat'); + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +% width/height which you want all images to get to scale the annotations. +% larger images are slower to process +scale = [460 460]; + +%% Data + +% get names of files in which annotations and image names are found +filenames = cv.glob(fullfile(dname, '*.txt')); + +% load image names and their corresponding landmarks +disp('Loading data...') +[imgFiles, pts] = cv.Facemark.loadTrainingData3(filenames); + +% load images +imgs = cell(size(imgFiles)); +for i=1:numel(imgFiles) + if true + % HELEN dataset annotations only store image basename + fname = fullfile(dname, [imgFiles{i} '.jpg']); + else + fname = imgFiles{i}; + end + imgs{i} = cv.imread(fname); +end + +%% Init +% create instance of the face landmark detection class, +% and set the face detector function +obj = cv.FacemarkKazemi('ConfigFile',configFile); +obj.setFaceDetector(faceDetectFcn); + +%% Train +% perform training +disp('Training...') +tic +success = obj.training(imgs, pts, configFile, scale, 'ModelFilename',modelFile); +toc +if success + disp('Training successful') +else + disp('Training failed') +end + +%% +% In the above call, |scale| is passed to scale all images and their +% corresponding landmarks, as it takes greater time to process large images. +% After scaling data it calculates mean shape of the data which is used as +% initial shape while training. It trains the model and stores the trained +% model file with the specified filename. As the training starts, you will see +% something like this: +% +% <> +% +% The error rate on trained images depends on the number of images used for +% training: +% +% <> +% +% The error rate on test images depends on the number of images used for +% training: +% +% <> +% + +%% Helper functions + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/samples/facemark_lbf_fitting_demo.m b/opencv_contrib/samples/facemark_lbf_fitting_demo.m new file mode 100644 index 000000000..b0423ec81 --- /dev/null +++ b/opencv_contrib/samples/facemark_lbf_fitting_demo.m @@ -0,0 +1,134 @@ +%% Face landmark detection in a video (LBF) +% +% This demos lets you detect landmarks of detected faces in a video. It first +% detects faces in a current video frame and then finds their facial landmarks. +% +% Sources: +% +% * +% + +%% Options + +% [INPUT] path to input video +if true + vid = fullfile(mexopencv.root(),'test','dudek.webm'); +else + vid = 0; +end + +% [INPUT] path to the trained model to load +modelFile = fullfile(mexopencv.root(),'test','lbfmodel.yaml'); +if exist(modelFile, 'file') ~= 2 + % download model from GitHub + disp('Downloading model (~ 54MB)...') + url = 'https://github.com/kurnianggoro/GSOC2017/raw/master/data/lbfmodel.yaml'; + urlwrite(url, modelFile); +end + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +%% Init +% create instance of the face landmark detection class, +% and set the face detector function, then load the pre-trained model +if true + obj = cv.Facemark('LBF'); + obj.setFaceDetector(faceDetectFcn); +else + obj = cv.Facemark('LBF', 'CascadeFace',xmlFace); +end +obj.loadModel(modelFile); + +%% Video +% open video, and prepare figure +cap = cv.VideoCapture(vid); +assert(cap.isOpened(), 'Failed to load video'); +img = cap.read(); +assert(~isempty(img), 'Failed to read frame'); +hImg = imshow(img); + +%% Detect +% main loop +counter = 0; +tID = tic(); +while ishghandle(hImg) + % read frame + img = cap.read(); + if isempty(img), break; end + + % scale frame + scale = 400 / size(img,2); + imgS = cv.resize(img, fix(scale * [size(img,2) size(img,1)])); + + % detect faces + rects = obj.getFaces(imgS); + rects = cellfun(@(r) fix(r/scale), rects, 'Uniform',false); + + % detect and display face landmarks + if ~isempty(rects) + img = cv.rectangle(img, rects, 'Color',[0 255 0]); + landmarks = obj.fit(img, rects); + for i=1:numel(landmarks) + img = cv.Facemark.drawFacemarks(img, landmarks{i}, 'Color',[0 0 255]); + end + end + + % show FPS + counter = counter + 1; + fps = counter/toc(tID); + txt = sprintf('faces: %d, fps: %03.2f', numel(rects), fps); + img = cv.putText(img, txt, [20 40], ... + 'FontFace','HersheyPlain', 'FontScale',2, ... + 'Thickness',2, 'Color',[255 255 255]); + + % show frame + results + set(hImg, 'CData',img) + drawnow +end +cap.release(); + +%% Helper function + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/samples/facemark_lbf_train_demo.m b/opencv_contrib/samples/facemark_lbf_train_demo.m new file mode 100644 index 000000000..1f2d4d5de --- /dev/null +++ b/opencv_contrib/samples/facemark_lbf_train_demo.m @@ -0,0 +1,169 @@ +%% Facemark LBF training demo +% +% The user should provides the list of training images accompanied by their +% corresponding landmarks location in separate files. +% +% See below for a description of file formats. +% +% Examples of datasets are available at +% . +% +% Sources: +% +% * +% * +% + +%% Preparation +% +% Before you continue with this tutorial, you should download a training +% dataset of facial landmarks detection. +% +% We suggest you to download the IBUG dataset which can be retrieved at +% +% +% First thing to do is to make two text files containing the list of image +% files and annotation files respectively. Make sure that the order of images +% and annotations in both files are matched. Furthermore, it is advised to use +% absolute paths instead of relative paths. +% +% Example to make the file list in Linux machine: +% +% ls /data/ibug/*.jpg > images.txt +% ls /data/ibug/*.pts > annotations.txt +% +% Example of content in the |images.txt| file: +% +% /data/ibug/image_003_1.jpg +% /data/ibug/image_004_1.jpg +% /data/ibug/image_005_1.jpg +% ... +% +% Example of content in the |annotations.txt| file: +% +% /data/ibug/image_003_1.pts +% /data/ibug/image_004_1.pts +% /data/ibug/image_005_1.pts +% ... +% +% where a |.pts| file contains the position of each face landmark. +% Make sure that the annotation format is supported by the API, where the +% contents should look like the following snippet: +% +% version: 1 +% n_points: 68 +% { +% 212.716603 499.771793 +% 230.232816 566.290071 +% ... +% } +% +% Once trained, we show how to use the model to detect face landmarks in a +% test image. +% +% You can also download a pre-trained model in this link +% +% (that way you can skip training and simply load the model). +% + +%% Options + +% [INPUT] path of a text file contains the list of paths to all training images +imgList = fullfile(mexopencv.root(),'test','facemark','ibug','images.lst'); +assert(exist(imgList, 'file') == 2, 'missing images list file'); + +% [INPUT] path of a text file contains the list of paths to all annotations files +ptsList = fullfile(mexopencv.root(),'test','facemark','ibug','annotations.lst'); +assert(exist(ptsList, 'file') == 2, 'missing annotations list file'); + +% [OUTPUT] path for saving the trained model +modelFile = fullfile(tempdir(), 'model_lbf.yaml'); + +% [INPUT] path to the cascade xml file for the face detector +xmlFace = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); +download_classifier_xml(xmlFace); + +% name of user-defined face detector function +faceDetectFcn = 'myFaceDetector'; +assert(exist([faceDetectFcn '.m'], 'file') == 2, 'missing face detect function'); + +% path to test image +testImg = fullfile(mexopencv.root(),'test','lena.jpg'); + +%% Init +% create the facemark instance +obj = cv.Facemark('LBF', 'NLandmarks',68, 'CascadeFace',xmlFace, ... + 'ModelFilename',modelFile, 'SaveModel',true, 'Verbose',true); + +%% +% set user-defined face detector +obj.setFaceDetector(faceDetectFcn); + +%% Data +% load the dataset, and add training samples one-by-one +disp('Loading data...') +[imgFiles, ptsFiles] = cv.Facemark.loadDatasetList(imgList, ptsList); +for i=1:numel(imgFiles) + % load image and its corresponding annotation data, then add pair + img = cv.imread(imgFiles{i}); + pts = cv.Facemark.loadFacePoints(ptsFiles{i}); + obj.addTrainingSample(img, pts); +end + +%% Train +% train the algorithm, model will be saved to specified file +disp('Training...') +tic +obj.training(); +toc + +%% Test +% run on some test image +img = cv.imread(testImg); +faces = obj.getFaces(img); +landmarks = obj.fit(img, faces); +for i=1:numel(faces) + img = cv.rectangle(img, faces{i}, 'Color',[255 0 255]); + img = cv.Facemark.drawFacemarks(img, landmarks{i}, 'Color',[0 0 255]); +end +imshow(img) + +%% Helper functions + +function download_classifier_xml(fname) + if exist(fname, 'file') ~= 2 + % attempt to download trained Haar/LBP/HOG classifier from Github + url = 'https://cdn.rawgit.com/opencv/opencv/3.4.0/data/'; + [~, f, ext] = fileparts(fname); + if strncmpi(f, 'haarcascade_', length('haarcascade_')) + url = [url, 'haarcascades/']; + elseif strncmpi(f, 'lbpcascade_', length('lbpcascade_')) + url = [url, 'lbpcascades/']; + elseif strncmpi(f, 'hogcascade_', length('hogcascade_')) + url = [url, 'hogcascades/']; + else + error('File not found'); + end + urlwrite([url f ext], fname); + end +end + +% The facemark API provides the functionality to the user to use their own +% face detector. The code below implements a sample face detector. This +% function must be saved in its own M-function to be used by the facemark API. +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(xmlFace); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/src/+cv/private/FacemarkKazemi_.cpp b/opencv_contrib/src/+cv/private/FacemarkKazemi_.cpp new file mode 100644 index 000000000..af6f95a01 --- /dev/null +++ b/opencv_contrib/src/+cv/private/FacemarkKazemi_.cpp @@ -0,0 +1,235 @@ +/** + * @file FacemarkKazemi_.cpp + * @brief mex interface for cv::face::FacemarkKazemi + * @ingroup face + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/face.hpp" +using namespace std; +using namespace cv; +using namespace cv::face; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; +/// name of MATLAB function to evaluate (custom face detector) +string func; + +/** Custom face detector implemented as a MATLAB function + * @param image_ input image. + * @param faces_ output faces. + * @param userData optional user-specified parameters (unused here) + * @return success flag. + */ +bool matlab_face_detector(InputArray image_, OutputArray faces_, void *userData) +{ + // create input to evaluate MATLAB function + mxArray *lhs, *rhs[2]; + rhs[0] = MxArray(func); + rhs[1] = MxArray(image_.getMat()); + + // evaluate specified function in MATLAB as: + // faces = feval("func", image) + bool success = (mexCallMATLAB(1, &lhs, 2, rhs, "feval") == 0); + if (success) { + vector faces(MxArray(lhs).toVector()); + Mat(faces).copyTo(faces_); + } + + // cleanup + mxDestroyArray(lhs); + mxDestroyArray(rhs[0]); + mxDestroyArray(rhs[1]); + + // return success flag + return success; +} + +/** Create an instance of FacemarkKazemi using options in arguments + * @param first iterator at the beginning of the vector range + * @param last iterator at the end of the vector range + * @return smart pointer to created cv::face::FacemarkKazemi + */ +Ptr createFacemarkKazemi( + vector::const_iterator first, + vector::const_iterator last) +{ + ptrdiff_t len = std::distance(first, last); + nargchk((len%2)==0); + FacemarkKazemi::Params parameters; + for (; first != last; first += 2) { + string key(first->toString()); + const MxArray& val = *(first + 1); + if (key == "CascadeDepth") + parameters.cascade_depth = static_cast(val.toInt()); + else if (key == "TreeDepth") + parameters.tree_depth = static_cast(val.toInt()); + else if (key == "NumTreesPerCascadeLevel") + parameters.num_trees_per_cascade_level = static_cast(val.toInt()); + else if (key == "LearningRate") + parameters.learning_rate = val.toFloat(); + else if (key == "OversamplingAmount") + parameters.oversampling_amount = static_cast(val.toInt()); + else if (key == "NumTestCoordinates") + parameters.num_test_coordinates = static_cast(val.toInt()); + else if (key == "Lambda") + parameters.lambda = val.toFloat(); + else if (key == "NumTestSplits") + parameters.num_test_splits = static_cast(val.toInt()); + else if (key == "ConfigFile") + parameters.configfile = val.toString(); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized option %s", key.c_str()); + } + return FacemarkKazemi::create(parameters); +} +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=3 && nlhs<=2); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + func = rhs[1].toString(); + string method(rhs[2].toString()); + + // constructor call + if (method == "new") { + nargchk(nrhs>=3 && nlhs<=1); + obj_[++last_id] = createFacemarkKazemi(rhs.begin() + 3, rhs.end()); + plhs[0] = MxArray(last_id); + mexLock(); + return; + } + + // Big operation switch + Ptr obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==3 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==3 && nlhs==0); + obj->clear(); + } + else if (method == "empty") { + nargchk(nrhs==3 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==3 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "read") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=4; iread(fn); + } + else if (method == "write") { + nargchk(nrhs==4 && nlhs<=1); + FileStorage fs(rhs[3].toString(), FileStorage::WRITE + + ((nlhs > 0) ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + fs << obj->getDefaultName() << "{"; + obj->write(fs); + fs << "}"; + if (nlhs > 0) + plhs[0] = MxArray(fs.releaseAndGetString()); + } + else if (method == "training") { + nargchk(nrhs>=7 && (nrhs%2)==1 && nlhs<=1); + string modelFilename = "face_landmarks.dat"; + for (int i=7; i images(rhs[3].toVector()); + vector images; + { + vector arr(rhs[3].toVector()); + images.reserve(arr.size()); + for (vector::const_iterator it = arr.begin(); it != arr.end(); ++it) + images.push_back(it->toMat(CV_8U)); + } + vector > landmarks(MxArrayToVectorVectorPoint(rhs[4])); + string configfile(rhs[5].toString()); + Size scale(rhs[6].toSize()); + bool b = obj->training(images, landmarks, configfile, scale, modelFilename); + plhs[0] = MxArray(b); + } + else if (method == "loadModel") { + nargchk(nrhs==4 && nlhs==0); + string filename(rhs[3].toString()); + obj->loadModel(filename); + } + else if (method == "fit") { + nargchk(nrhs==5 && nlhs<=2); + Mat image(rhs[3].toMat(CV_8U)); + vector faces(rhs[4].toVector()); + vector > landmarks; + bool b = obj->fit(image, faces, landmarks); + plhs[0] = MxArray(landmarks); + if (nlhs > 1) + plhs[1] = MxArray(b); + } + else if (method == "setFaceDetector") { + nargchk(nrhs==4 && nlhs<=1); + func = rhs[3].toString(); + bool b = obj->setFaceDetector(matlab_face_detector, NULL); + plhs[0] = MxArray(b); + } + else if (method == "getFaces") { + nargchk(nrhs==4 && nlhs<=2); + Mat image(rhs[3].toMat(CV_8U)); + vector faces; + bool b = obj->getFaces(image, faces); + plhs[0] = MxArray(faces); + if (nlhs > 1) + plhs[1] = MxArray(b); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/src/+cv/private/Facemark_.cpp b/opencv_contrib/src/+cv/private/Facemark_.cpp new file mode 100644 index 000000000..489d2a520 --- /dev/null +++ b/opencv_contrib/src/+cv/private/Facemark_.cpp @@ -0,0 +1,500 @@ +/** + * @file Facemark_.cpp + * @brief mex interface for cv::face::Facemark, cv::face::FacemarkLBF, cv::face::FacemarkAAM + * @ingroup face + * @author Amro + * @date 2018 + */ +#include "mexopencv.hpp" +#include "opencv2/face.hpp" +using namespace std; +using namespace cv; +using namespace cv::face; + +// Persistent objects +namespace { +/// Last object id to allocate +int last_id = 0; +/// Object container +map > obj_; +/// name of MATLAB function to evaluate (custom face detector) +string func; + +/** Custom face detector implemented as a MATLAB function + * @param image_ input image. + * @param faces_ output faces. + * @param userData optional user-specified parameters (unused here) + * @return success flag. + */ +bool matlab_face_detector(InputArray image_, OutputArray faces_, void *userData) +{ + // create input to evaluate MATLAB function + mxArray *lhs, *rhs[2]; + rhs[0] = MxArray(func); + rhs[1] = MxArray(image_.getMat()); + + // evaluate specified function in MATLAB as: + // faces = feval("func", image) + bool success = (mexCallMATLAB(1, &lhs, 2, rhs, "feval") == 0); + if (success) { + vector faces(MxArray(lhs).toVector()); + Mat(faces).copyTo(faces_); + } + + // cleanup + mxDestroyArray(lhs); + mxDestroyArray(rhs[0]); + mxDestroyArray(rhs[1]); + + // return success flag + return success; +} + +/** Convert an MxArray to cv::face::FacemarkAAM::Config + * @param arr struct-array MxArray object + * @param idx linear index of the struct array element + * @return config object + */ +FacemarkAAM::Config MxArrayToConfig(const MxArray& arr, mwIndex idx = 0) +{ + return FacemarkAAM::Config( + arr.isField("R") ? arr.at("R", idx).toMat(CV_32F) : Mat::eye(2,2,CV_32F), + arr.isField("t") ? arr.at("t", idx).toPoint2f() : Point2f(0,0), + arr.isField("scale") ? arr.at("scale", idx).toFloat() : 1.0f, + arr.isField("scaleIdx") ? arr.at("scaleIdx", idx).toInt() : 0 + ); +} + +/** Convert an MxArray to std::vector + * @param arr struct-array MxArray object + * @return vector of config objects + */ +vector MxArrayToVectorConfig(const MxArray& arr) +{ + const mwSize n = arr.numel(); + vector configs; + configs.reserve(n); + if (arr.isCell()) + for (mwIndex i = 0; i < n; ++i) + configs.push_back(MxArrayToConfig(arr.at(i))); + else if (arr.isStruct()) + for (mwIndex i = 0; i < n; ++i) + configs.push_back(MxArrayToConfig(arr,i)); + else + mexErrMsgIdAndTxt("mexopencv:error", + "MxArray unable to convert to std::vector"); + return configs; +} + +/** Create an instance of FacemarkLBF using options in arguments + * @param first iterator at the beginning of the vector range + * @param last iterator at the end of the vector range + * @return smart pointer to created cv::face::FacemarkLBF + */ +Ptr createFacemarkLBF( + vector::const_iterator first, + vector::const_iterator last) +{ + ptrdiff_t len = std::distance(first, last); + nargchk((len%2)==0); + FacemarkLBF::Params parameters; + for (; first != last; first += 2) { + string key(first->toString()); + const MxArray& val = *(first + 1); + if (key == "ShapeOffset") + parameters.shape_offset = val.toDouble(); + else if (key == "CascadeFace") + parameters.cascade_face = val.toString(); + else if (key == "Verbose") + parameters.verbose = val.toBool(); + else if (key == "NLandmarks") + parameters.n_landmarks = val.toInt(); + else if (key == "InitShapeN") + parameters.initShape_n = val.toInt(); + else if (key == "StagesN") + parameters.stages_n = val.toInt(); + else if (key == "TreeN") + parameters.tree_n = val.toInt(); + else if (key == "TreeDepth") + parameters.tree_depth = val.toInt(); + else if (key == "BaggingOverlap") + parameters.bagging_overlap = val.toDouble(); + else if (key == "ModelFilename") + parameters.model_filename = val.toString(); + else if (key == "SaveModel") + parameters.save_model = val.toBool(); + else if (key == "Seed") + parameters.seed = static_cast(val.toInt()); + else if (key == "FeatsM") + parameters.feats_m = val.toVector(); + else if (key == "RadiusM") + parameters.radius_m = val.toVector(); + else if (key == "Pupils") { + if (!val.isCell() || val.numel() != 2) + mexErrMsgIdAndTxt("mexopencv:error", "Invalid arguments"); + vector arr(val.toVector()); + parameters.pupils[0] = arr[0].toVector(); + parameters.pupils[1] = arr[1].toVector(); + } + else if (key == "DetectROI") + parameters.detectROI = val.toRect(); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized option %s", key.c_str()); + } + return FacemarkLBF::create(parameters); +} + +/** Create an instance of FacemarkAAM using options in arguments + * @param first iterator at the beginning of the vector range + * @param last iterator at the end of the vector range + * @return smart pointer to created cv::face::FacemarkAAM + */ +Ptr createFacemarkAAM( + vector::const_iterator first, + vector::const_iterator last) +{ + ptrdiff_t len = std::distance(first, last); + nargchk((len%2)==0); + FacemarkAAM::Params parameters; + for (; first != last; first += 2) { + string key(first->toString()); + const MxArray& val = *(first + 1); + if (key == "ModelFilename") + parameters.model_filename = val.toString(); + else if (key == "M") + parameters.m = val.toInt(); + else if (key == "N") + parameters.n = val.toInt(); + else if (key == "NIter") + parameters.n_iter = val.toInt(); + else if (key == "Verbose") + parameters.verbose = val.toBool(); + else if (key == "SaveModel") + parameters.save_model = val.toBool(); + else if (key == "MaxM") + parameters.max_m = val.toInt(); + else if (key == "MaxN") + parameters.max_n = val.toInt(); + else if (key == "TextureMaxM") + parameters.texture_max_m = val.toInt(); + else if (key == "Scales") + parameters.scales = val.toVector(); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized option %s", key.c_str()); + } + return FacemarkAAM::create(parameters); +} + +/** Create an instance of Facemark using options in arguments + * @param type facemark algorithm, one of: + * - "LBF" + * - "AAM" + * @param first iterator at the beginning of the vector range + * @param last iterator at the end of the vector range + * @return smart pointer to created cv::face::Facemark + */ +Ptr createFacemark( + const string& type, + vector::const_iterator first, + vector::const_iterator last) +{ + Ptr p; + if (type == "LBF") + p = createFacemarkLBF(first, last); + else if (type == "AAM") + p = createFacemarkAAM(first, last); + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized facemark %s", type.c_str()); + if (p.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to create Facemark"); + return p; +} +} + +/** + * Main entry called from Matlab + * @param nlhs number of left-hand-side arguments + * @param plhs pointers to mxArrays in the left-hand-side + * @param nrhs number of right-hand-side arguments + * @param prhs pointers to mxArrays in the right-hand-side + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Check the number of arguments + nargchk(nrhs>=3 && nlhs<=3); + + // Argument vector + vector rhs(prhs, prhs+nrhs); + int id = rhs[0].toInt(); + func = rhs[1].toString(); + string method(rhs[2].toString()); + + // constructor call + if (method == "new") { + nargchk(nrhs>=4 && nlhs<=1); + obj_[++last_id] = createFacemark( + rhs[3].toString(), rhs.begin() + 4, rhs.end()); + plhs[0] = MxArray(last_id); + mexLock(); + return; + } + // static method call + else if (method == "getFacesHAAR") { + nargchk(nrhs==5 && nlhs<=2); + Mat image(rhs[3].toMat(CV_8U)); + string face_cascade_name(rhs[4].toString()); + vector faces; + bool b = cv::face::getFacesHAAR(image, faces, face_cascade_name); + plhs[0] = MxArray(faces); + if (nlhs > 1) + plhs[1] = MxArray(b); + return; + } + else if (method == "loadDatasetList") { + nargchk(nrhs==5 && nlhs<=3); + string imageList(rhs[3].toString()); + string annotationList(rhs[4].toString()); + vector images, annotations; + bool b = cv::face::loadDatasetList( + imageList, annotationList, images, annotations); + plhs[0] = MxArray(images); + if (nlhs > 1) + plhs[1] = MxArray(annotations); + if (nlhs > 2) + plhs[2] = MxArray(b); + return; + } + else if (method == "loadTrainingData1") { + nargchk(nrhs>=5 && (nrhs%2)==1 && nlhs<=3); + float offset = 0.0f; + for (int i=5; i images; + vector > facePoints; + bool b = cv::face::loadTrainingData( + imageList, groundTruth, images, facePoints, offset); + plhs[0] = MxArray(images); + if (nlhs > 1) + plhs[1] = MxArray(facePoints); + if (nlhs > 2) + plhs[2] = MxArray(b); + return; + } + else if (method == "loadTrainingData2") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=3); + char delim = ' '; + float offset = 0.0f; + for (int i=4; i images; + vector > facePoints; + bool b = cv::face::loadTrainingData( + filename, images, facePoints, delim, offset); + plhs[0] = MxArray(images); + if (nlhs > 1) + plhs[1] = MxArray(facePoints); + if (nlhs > 2) + plhs[2] = MxArray(b); + return; + } + else if (method == "loadTrainingData3") { + nargchk(nrhs==4 && nlhs<=3); + vector filenames(rhs[3].toVector()); + vector > trainlandmarks; + vector trainimages; + bool b = cv::face::loadTrainingData( + vector(filenames.begin(), filenames.end()), + trainlandmarks, trainimages); + plhs[0] = MxArray(trainlandmarks); + if (nlhs > 1) + plhs[1] = MxArray(trainimages); + if (nlhs > 2) + plhs[2] = MxArray(b); + return; + } + else if (method == "loadFacePoints") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs<=2); + float offset = 0.0f; + for (int i=4; i points; + bool b = cv::face::loadFacePoints(filename, points, offset); + plhs[0] = MxArray(points); + if (nlhs > 1) + plhs[1] = MxArray(b); + return; + } + else if (method == "drawFacemarks") { + nargchk(nrhs>=5 && (nrhs%2)==1 && nlhs<=1); + Scalar color(255,0,0); + for (int i=5; i points(rhs[4].toVector()); + cv::face::drawFacemarks(image, points, color); + plhs[0] = MxArray(image); + return; + } + + // Big operation switch + Ptr obj = obj_[id]; + if (obj.empty()) + mexErrMsgIdAndTxt("mexopencv:error", "Object not found id=%d", id); + if (method == "delete") { + nargchk(nrhs==3 && nlhs==0); + obj_.erase(id); + mexUnlock(); + } + else if (method == "clear") { + nargchk(nrhs==3 && nlhs==0); + obj->clear(); + } + else if (method == "empty") { + nargchk(nrhs==3 && nlhs<=1); + plhs[0] = MxArray(obj->empty()); + } + else if (method == "getDefaultName") { + nargchk(nrhs==3 && nlhs<=1); + plhs[0] = MxArray(obj->getDefaultName()); + } + else if (method == "read") { + nargchk(nrhs>=4 && (nrhs%2)==0 && nlhs==0); + string objname; + bool loadFromString = false; + for (int i=4; iread(fn); + } + else if (method == "write") { + nargchk(nrhs==4 && nlhs<=1); + FileStorage fs(rhs[3].toString(), FileStorage::WRITE + + ((nlhs > 0) ? FileStorage::MEMORY : 0)); + if (!fs.isOpened()) + mexErrMsgIdAndTxt("mexopencv:error", "Failed to open file"); + fs << obj->getDefaultName() << "{"; + obj->write(fs); + fs << "}"; + if (nlhs > 0) + plhs[0] = MxArray(fs.releaseAndGetString()); + } + else if (method == "addTrainingSample") { + nargchk(nrhs==5 && nlhs<=1); + Mat image(rhs[3].toMat(CV_8U)); + vector landmarks(rhs[4].toVector()); + bool b = obj->addTrainingSample(image, landmarks); + plhs[0] = MxArray(b); + } + else if (method == "training") { + nargchk(nrhs==3 && nlhs==0); + obj->training(NULL); //NOTE: null for unused input + } + else if (method == "loadModel") { + nargchk(nrhs==4 && nlhs==0); + string model(rhs[3].toString()); + obj->loadModel(model); + } + else if (method == "fit") { + nargchk(nrhs>=5 && (nrhs%2)==1 && nlhs<=2); + vector configs; + for (int i=5; i faces(rhs[4].toVector()); + vector > landmarks; + bool b = obj->fit(image, faces, landmarks, + !configs.empty() ? &configs : NULL); + plhs[0] = MxArray(landmarks); + if (nlhs > 1) + plhs[1] = MxArray(b); + } + else if (method == "setFaceDetector") { + nargchk(nrhs==4 && nlhs<=1); + func = rhs[3].toString(); + bool b = obj->setFaceDetector(matlab_face_detector, NULL); + plhs[0] = MxArray(b); + } + else if (method == "getFaces") { + nargchk(nrhs==4 && nlhs<=2); + Mat image(rhs[3].toMat(CV_8U)); + vector faces; + bool b = obj->getFaces(image, faces); + plhs[0] = MxArray(faces); + if (nlhs > 1) + plhs[1] = MxArray(b); + } + else if (method == "getData") { + nargchk(nrhs==3 && nlhs<=2); + bool b; + Ptr p = obj.dynamicCast(); + if (!p.empty()) { + // AAM + FacemarkAAM::Data items; + b = obj->getData(&items); + plhs[0] = MxArray(items.s0); + } + else { + // LBF + b = obj->getData(NULL); //NOTE: null for unused output + plhs[0] = MxArray(Mat()); + } + if (nlhs > 1) + plhs[1] = MxArray(b); + } + else + mexErrMsgIdAndTxt("mexopencv:error", + "Unrecognized operation %s", method.c_str()); +} diff --git a/opencv_contrib/test/unit_tests/TestFacemark.m b/opencv_contrib/test/unit_tests/TestFacemark.m new file mode 100644 index 000000000..e0775aa0e --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestFacemark.m @@ -0,0 +1,203 @@ +classdef TestFacemark + %TestFacemark + + properties (Constant) + func = 'myFaceDetector'; + root = fullfile(mexopencv.root(),'test','facemark'); + fCascade = fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml'); + end + + methods (Static) + function test_create + obj = cv.Facemark('LBF'); + assert(isobject(obj)); + + obj = cv.Facemark('AAM'); + assert(isobject(obj)); + end + + function test_haar_detect + img = cv.imread(fullfile(TestFacemark.root,'david1.jpg')); + [faces, b] = cv.Facemark.getFacesHAAR(img, TestFacemark.fCascade); + validateattributes(faces, {'cell'}, {'vector'}); + cellfun(@(f) validateattributes(f, {'numeric'}, ... + {'vector', 'numel',4}), faces); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_default_detector + img = cv.imread(fullfile(TestFacemark.root,'david1.jpg')); + obj = cv.Facemark('LBF', 'CascadeFace',TestFacemark.fCascade); + [faces, b] = obj.getFaces(img); + validateattributes(faces, {'cell'}, {'vector'}); + cellfun(@(f) validateattributes(f, {'numeric'}, ... + {'vector', 'numel',4}), faces); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_custom_detector + % skip test if external M-file is not found on the path + if ~exist([TestFacemark.func '.m'], 'file') + error('mexopencv:testskip', 'undefined function'); + end + + if true + obj = cv.Facemark('LBF'); + else + obj = cv.Facemark('AAM'); + end + + b = obj.setFaceDetector(TestFacemark.func); + validateattributes(b, {'logical'}, {'scalar'}); + + img = cv.imread(fullfile(TestFacemark.root,'david1.jpg')); + [faces, b] = obj.getFaces(img); + validateattributes(faces, {'cell'}, {'vector'}); + cellfun(@(f) validateattributes(f, {'numeric'}, ... + {'vector', 'numel',4}), faces); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_train + if true + % training is time consuming + error('mexopencv:testskip', 'slow'); + end + + modelFilename = fullfile(tempdir(), 'model.yaml'); + opts = {'Verbose',false, ... + 'ModelFilename',modelFilename, 'SaveModel',true}; + if true + obj = cv.Facemark('LBF', ... + 'CascadeFace',TestFacemark.fCascade, opts{:}); + else + obj = cv.Facemark('AAM', 'N',1, 'M',1, opts{:}); + end + + fImgs = cv.glob(fullfile(TestFacemark.root,'david*.jpg')); + fPts = cv.glob(fullfile(TestFacemark.root,'david*.pts')); + for i=1:numel(fImgs) + [pts, b] = cv.Facemark.loadFacePoints(fPts{i}); + validateattributes(pts, {'cell'}, {'vector'}); + cellfun(@(p) validateattributes(p, {'numeric'}, ... + {'vector', 'numel',2}), pts); + validateattributes(b, {'logical'}, {'scalar'}); + + img = cv.imread(fImgs{i}); + b = obj.addTrainingSample(img, pts); + validateattributes(b, {'logical'}, {'scalar'}); + end + obj.training(); + assert(exist(modelFilename, 'file') == 2, 'no trained model saved'); + delete(modelFilename); + end + + function test_detect + obj = cv.Facemark('LBF', 'CascadeFace',TestFacemark.fCascade); + obj.loadModel(get_model_file()); + + img = cv.imread(fullfile(TestFacemark.root,'david1.jpg')); + + [faces, b] = obj.getFaces(img); + validateattributes(faces, {'cell'}, {'vector'}); + cellfun(@(f) validateattributes(f, {'numeric'}, ... + {'vector', 'numel',4}), faces); + validateattributes(b, {'logical'}, {'scalar'}); + + [pts, b] = obj.fit(img, faces); + validateattributes(pts, {'cell'}, {'vector', 'numel',numel(faces)}); + cellfun(@(c) validateattributes(c, {'cell'}, {'vector'}), pts); + cellfun(@(c) cellfun(@(p) validateattributes(p, ... + {'numeric'}, {'vector', 'numel',2}), c), pts); + validateattributes(b, {'logical'}, {'scalar'}); + + if ~isempty(pts) + out = cv.Facemark.drawFacemarks(img, pts{1}, 'Color',[255 0 0]); + validateattributes(out, {class(img)}, {'size',size(img)}); + end + end + + function test_load_dataset_list + [imgFiles, ptsFiles, b] = cv.Facemark.loadDatasetList(... + fullfile(TestFacemark.root,'images.txt'), ... + fullfile(TestFacemark.root,'annotations.txt')); + validateattributes(imgFiles, {'cell'}, {'vector'}); + assert(iscellstr(imgFiles)); + validateattributes(ptsFiles, {'cell'}, ... + {'vector', 'numel',numel(imgFiles)}); + assert(iscellstr(ptsFiles)); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_load_data_1 + [imgFiles, pts, b] = cv.Facemark.loadTrainingData1(... + fullfile(TestFacemark.root,'images.txt'), ... + fullfile(TestFacemark.root,'annotations.txt')); + validateattributes(imgFiles, {'cell'}, {'vector'}); + assert(iscellstr(imgFiles)); + validateattributes(pts, {'cell'}, ... + {'vector', 'numel',numel(imgFiles)}); + cellfun(@(c) validateattributes(c, {'cell'}, {'vector'}), pts); + cellfun(@(c) cellfun(@(p) validateattributes(p, ... + {'numeric'}, {'vector', 'numel',2}), c), pts); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_load_data_2 + [imgFiles, pts, b] = cv.Facemark.loadTrainingData2(... + fullfile(TestFacemark.root,'points.txt')); + validateattributes(imgFiles, {'cell'}, {'vector'}); + assert(iscellstr(imgFiles)); + validateattributes(pts, {'cell'}, ... + {'vector', 'numel',numel(imgFiles)}); + cellfun(@(c) validateattributes(c, {'cell'}, {'vector'}), pts); + cellfun(@(c) cellfun(@(p) validateattributes(p, ... + {'numeric'}, {'vector', 'numel',2}), c), pts); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_load_data_3 + fPoints = cv.glob(fullfile(TestFacemark.root,'david*.txt')); + [imgFiles, pts, b] = cv.Facemark.loadTrainingData3(fPoints); + validateattributes(imgFiles, {'cell'}, ... + {'vector', 'numel',numel(fPoints)}); + assert(iscellstr(imgFiles)); + validateattributes(pts, {'cell'}, ... + {'vector', 'numel',numel(imgFiles)}); + cellfun(@(c) validateattributes(c, {'cell'}, {'vector'}), pts); + cellfun(@(c) cellfun(@(p) validateattributes(p, ... + {'numeric'}, {'vector', 'numel',2}), c), pts); + validateattributes(b, {'logical'}, {'scalar'}); + end + end + +end + +function modelFile = get_model_file() + modelFile = fullfile(mexopencv.root(),'test','lbfmodel.yaml'); + if exist(modelFile, 'file') ~= 2 + % download model from GitHub (~ 54MB) + url = 'https://github.com/kurnianggoro/GSOC2017/raw/master/data/lbfmodel.yaml'; + urlwrite(url, modelFile); + end +end + +% TODO: this function needs to be on the path as a top-level function +% saved in its own M-file. + +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(TestFacemark.fCascade); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.4, 'MinNeighbors',2, ... + 'ScaleImage',true, 'MinSize',[30 30]); +end diff --git a/opencv_contrib/test/unit_tests/TestFacemarkKazemi.m b/opencv_contrib/test/unit_tests/TestFacemarkKazemi.m new file mode 100644 index 000000000..81ace1210 --- /dev/null +++ b/opencv_contrib/test/unit_tests/TestFacemarkKazemi.m @@ -0,0 +1,118 @@ +classdef TestFacemarkKazemi + %TestFacemarkKazemi + + properties (Constant) + func = 'myFaceDetector'; + root = fullfile(mexopencv.root(),'test','facemark'); + end + + methods (Static) + function test_create + obj = cv.FacemarkKazemi(); + assert(isobject(obj)); + end + + function test_custom_detector + % skip test if external M-file is not found on the path + if ~exist([TestFacemarkKazemi.func '.m'], 'file') + error('mexopencv:testskip', 'undefined function'); + end + + obj = cv.FacemarkKazemi(); + b = obj.setFaceDetector(TestFacemarkKazemi.func); + validateattributes(b, {'logical'}, {'scalar'}); + + img = cv.imread(fullfile(TestFacemarkKazemi.root,'david1.jpg')); + [faces, b] = obj.getFaces(img); + validateattributes(faces, {'cell'}, {'vector'}); + cellfun(@(f) validateattributes(f, {'numeric'}, ... + {'vector', 'numel',4}), faces); + validateattributes(b, {'logical'}, {'scalar'}); + end + + function test_train + if true + % training is time consuming + error('mexopencv:testskip', 'slow'); + end + + % skip test if external M-file is not found on the path + if ~exist([TestFacemark.func '.m'], 'file') + error('mexopencv:testskip', 'undefined function'); + end + + modelFilename = fullfile(tempdir(),'model.dat'); + fConfig = fullfile(TestFacemarkKazemi.root,'config.xml'); + fImgs = cv.glob(fullfile(TestFacemarkKazemi.root,'david*.jpg')); + fPts = cv.glob(fullfile(TestFacemarkKazemi.root,'david*.pts')); + + obj = cv.FacemarkKazemi('ConfigFile',fConfig); + obj.setFaceDetector(TestFacemarkKazemi.func); + + imgs = cellfun(@cv.imread, fImgs, 'UniformOutput',false); + pts = cellfun(@cv.Facemark.loadFacePoints, fPts, 'UniformOutput',false); + + b = obj.training(imgs, pts, fConfig, [460 460], ... + 'ModelFilename',modelFilename); + validateattributes(b, {'logical'}, {'scalar'}); + assert(exist(modelFilename, 'file') == 2, 'no trained model saved'); + delete(modelFilename); + end + + function test_detect + % skip test if external M-file is not found on the path + if ~exist([TestFacemark.func '.m'], 'file') + error('mexopencv:testskip', 'undefined function'); + end + + obj = cv.FacemarkKazemi(); + obj.setFaceDetector(TestFacemarkKazemi.func); + obj.loadModel(get_model_file()); + + img = cv.imread(fullfile(TestFacemarkKazemi.root,'david1.jpg')); + + [faces, b] = obj.getFaces(img); + validateattributes(faces, {'cell'}, {'vector'}); + cellfun(@(f) validateattributes(f, {'numeric'}, ... + {'vector', 'numel',4}), faces); + validateattributes(b, {'logical'}, {'scalar'}); + + [pts, b] = obj.fit(img, faces); + validateattributes(pts, {'cell'}, {'vector', 'numel',numel(faces)}); + cellfun(@(c) validateattributes(c, {'cell'}, {'vector'}), pts); + cellfun(@(c) cellfun(@(p) validateattributes(p, ... + {'numeric'}, {'vector', 'numel',2}), c), pts); + validateattributes(b, {'logical'}, {'scalar'}); + end + end + +end + +function modelFile = get_model_file() + modelFile = fullfile(mexopencv.root(),'test','face_landmark_model.dat'); + if exist(modelFile, 'file') ~= 2 + % download model from GitHub (~ 69MB) + url = 'https://cdn.rawgit.com/opencv/opencv_3rdparty/contrib_face_alignment_20170818/face_landmark_model.dat'; + urlwrite(url, modelFile); + end +end + +% TODO: this function needs to be on the path as a top-level function +% saved in its own M-file. + +function faces = myFaceDetector(img) + persistent obj + if isempty(obj) + obj = cv.CascadeClassifier(); + obj.load(fullfile(mexopencv.root(),'test','lbpcascade_frontalface.xml')); + end + + if size(img,3) > 1 + gray = cv.cvtColor(img, 'RGB2GRAY'); + else + gray = img; + end + gray = cv.equalizeHist(gray); + faces = obj.detect(gray, 'ScaleFactor',1.1, 'MinNeighbors',3, ... + 'ScaleImage',false, 'MinSize',[30 30]); +end diff --git a/test/facemark/annotations.txt b/test/facemark/annotations.txt new file mode 100644 index 000000000..2bca560cd --- /dev/null +++ b/test/facemark/annotations.txt @@ -0,0 +1,2 @@ +C:\Users\Amro\Desktop\mexopencv\test\facemark\david1.pts +C:\Users\Amro\Desktop\mexopencv\test\facemark\david2.pts diff --git a/test/facemark/config.xml b/test/facemark/config.xml new file mode 100644 index 000000000..969df63ef --- /dev/null +++ b/test/facemark/config.xml @@ -0,0 +1,11 @@ + + +10 +4 +500 +1.0000000149011612e-01 +20 +400 +1.0000000149011612e-01 +20 + diff --git a/test/facemark/david1.jpg b/test/facemark/david1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..579cbd8490c0c09158f93a31c303bfe2531071ca GIT binary patch literal 19981 zcmbTdWmFtp&;~eoa0_lpfZ!0^Ed+NRoZ#;6fgr&xK=2S;C%8j!XYj#okl+JB1{h#@ zzwhkspZ&4Bx6i$&`}FNT_4KV%b?d3>=cVT@0D+R6q8tDT2>?KPIRMYA02u%>(tqv0 z5&6G{it^uxhKh=UijIbk{y!Z9^A$P}gn7b3d;|Y89`eRCAT`wj*uNI8N!ea)Dl!0%X@!~&d|AXxR9kB5K zU&#J1VE;F+6#xzj(#ztZ5CS9th?0hOEQ72E_d#bv2bbTG|MI$iKtw9`2w+f<*kZ11 zLk2aq6(n(gBgL1DRj|gJ;x|>&c!`7IQO7s-JfzX9TAP)9FQkTiAynOQ6DgXXbUEE< zo@1mlXP5?gA|`k`0@5hnZapMo{Pq$JmOCTyqn=LiSidQOzz2vzcG%RG08Ef@qi8Xe z1E`p>4Av0^Zq&!_=GD&pQd7I&?G(On-I0&p)-3hw*u4tWt)X$e34=$1mf0zbS9J1? z!+M~j_3o?Zz&vTOnJo5FIzH9J#?tNY+es9asSBdW13iJy02HM-Z{Ad2xcBkwnGG+GFxn0M zAX^-3Nc^Eu+^(wwDXa2#K(yd8xIpL%zT$;sKNwflwzM`?Yp+DD9y9cQs7=}NkS0C; zD@sd^3*7XM%B01HQSJO;|F=>T^O*h7!wJ7?fmCVb6xJ8=gIq#wiBd}bgkNv)av^V> zE14g1TJQhqWoo4-q7LAHbj+I1&1xv^py9eH|JXtGbC*~vLDkB51TK8SmiaVIw}DFA zMq(3cWdqx{+js-*uFoCXv?BR$du0!0^ zyW%LSy-26MFgwIFCMW;H^IpfaXy$|cXn8}?G$-fD8+&Tpxi(wzNxR{4G^%`=_$S?| z(n2E60gmC1%t_ifN*x4U2})%S%|_Y$V~v6b^L^WTYTZ$llY!~>h9O^>vPYb?*)={4 zd-T=wC)hZ)8TO6slF%QRBdP=rr^@)Qsf|0*jzo&v*LxEf9>-|iHyZi=N-5l2ilC6zf!(O@E+#BB<+DB=*efLJtI1_z8@IQ!M{j z;7c1vM_dWl($FW;-v;J8u7%(r*xMFCTWn5uQ{<6Q{v-3hqNQMb5@21=mO6%FIo~@r z?To*GW7Fj=*RadYf%ug`qVZ(Fl;dm&ddP$a`P+_;My#JW31yOz!Nl7I5i4qGPBFN( zDGJpp4~1a2ByKo2L(gc7KqN)|Ei|zSO22b0Allo`>Qdpz7saf|L|9>qzg@1V z{Np~v0l1Wo5i(T$9#lu@q z z{;VuNdB;=;*=5{dhWJe>Tju(b6gpN6dz6p6Hyy)YcdFpRs+ zC{0nUT05mZEwr$8i$O5tHx59qY&#t>`Ss~jfDC-$odrXZwCqg^XDDM;E{Pvsjr7Mu za^Ype0lFtVrhivFi&y>AtsjPIrq~qBZN*I=$9#+ml#e7`I=6ubYoGK_ zT0ql7GxcWcw+lRYfr~>0PE;s1@+Gn~-;+O8G=}qzR<#Ex=*Eb-XYW>!^B1+|HCsOf z`Z9ba9}?QHUS00^K*j1P^e%&&o=Z*Sx<*`JOxR|%VUGqcK+m~P;zZMRV<}o~&gVJ>ne8&?-WpE4EFlqVuFtJe`4mJ>&&-+bxy znN-X}nC38use})-KSZ%gjT5jqR9A97gP2#@x<+q&qB(r02-Tl%AO(aSn{JR2HV;Q8 zq$S8D{@cdrKd!O3Cy7yNDZ*>I-8?CL5E*KQIr!=YN@@6g34F92)sf;5I^GB@JQ z8og8Cg$dgiov#M>OY%+0tpE0YKe8F>zR!#2ZdLMrJC#w6SiI2fUbi^m*^0>#e}IVk zB5AhCYtE4@_%(~SKf*LZp@$C-*?p}lR%d?({TGfr^)jNJU#BxyQRi2aGWtZnS}l%`EAk@@gs0k6@G#-;gt^{_2D-7qn8qhZ+w;UOdY=^!)ASa% zsWSqXZheuCzbpTcL6@iPaxik7x3#waa}b|>kjs!CzsRJGb4lE^KjL~**=V>>n(ley z_yC_4o~+=yowSzqB_JnLdxUcC2C8|y4;TE-qKk1qb z(=$o@D2^Toh;?v@6Y;lp@0qAA`y;^*^G^Jcd?d2M8}=}=pAx-yd0~Q<*Z~_l^JK^* zr5L7l+8n?2D!yZM9y1&|pdxLYPG9w&1Mz#{`seau*6Vuqd$9ddCHrI8^-!&)&nwBD zBp`tylhpC(IR3r5f;Pb355u0DQ3(hATZ{|elP7kpn z6_G{`vptUMqfCy;hadPyjgBTRQR9-d5A}#mWuF12xhV5`YPldUhZ4+dkx?;KYJ!tg zM#6%ww?cnF^zbyOT@%&e8z}}EHqtj%EYN~}k}@bLMxdo#IDEnK6OP8WXTV;?u(qkW zce%Pt4RX-zSODHPwjyjLU2mHy<2rm%_2zamA{D$orOs61-w<`W7{uFhdw2T}$DWTR znJy!8uM2AZYV};Pa8-ZBQbp=|j~>7)f51u>zt%AOC)di&CS+f&3+Wi1nX7s3H#G{L zr|HJ-ej=Ov^tlo+B5}INql)hk2q>iw7GMRv;Xk(D+bZk#qQk4ln|zszwLY(?Sn_# zY2SJeho1qJ-LCa^sji#{XTQ(b%HW>0c8eQqu>(qC4G11_>%@}Nic@z@53PEMzb4o^ z3tm62AH?5lyNoBhE8BjykfBZ?`0zn=k?YG74_pr)voxV}%?vE|=^5~AJn+YG&q&W= zM;@DMoJO6h-p$I6V2Yqq3?|A-x0;&Qk)g7sLFul}+^Jik)CflIZA~1uLVr|(x>`;p zo9LTdXCZME%B8u+t)=9P;TqO759eZ=t(ED9l9e%QVHKcm;R^I?B5%B&O&$RCb11&? z;?q}$%hB+jz_P`33Fnn*9~(qF<@naCf^0?G!vZP|{cILt>ZW|>EQtRh6u&E2059G_j#FGcebQ0H zMNgvnF++k_dOHcPmwx%wN5jbP2IL)!gcGO(g`n3XNQ(046k=EH09+{KgHAjd;*Nh! zezj>>!>#2C#G5SM<7>|5DO9y|aVY-f_;M*?Q+{me9G@r$h-m<;v0iIHc2N}DnBdM! zPw5))Z&lu^O9<$N-!mX)@2@m?rJ%s<2ShxW_KFEa(zT;7(Ox_ur4a`c;{hA!0o7tHmfTT|sq$8q^p!d}) zIpgY#X+>j9yQA!)P1+u9kth2`knBe^(e}t|G$vMiz~j)1SK<|+NUbyg^Kc(jCoN%P zK_-MSsB$fZ`r&vUkK+UImh)YviIRghJwE zW+Z`=a*3hgnA_6DUg(>4IH~_M?cP5oEWGVVhRI}IKGO_=eZeYne_9Z7AnbICr&`*t zvz(8!yptWL?~aj19i05j{=N_(F>g}GU)Ba4+EK-+?&rUmT+B8t+->R zk=6jrtqU}#8)SLX?!6=v2_JbBg)ttF6t77W=~egci!)J*c@~aV-JEqwQUBy+`L0x%C~Ndbp+3HpHb$5yI2_`dOI@i9RW-ghOOL zilD|&tMjnrLrOL?KajM&OR4iLrgH;?`g6Wx|M0$c|J3}o?W$E9*6^S}_CK-V8<)p? zKFQZXlH7iU4EA3h=j%CN6?}D!y!ao}ZePiAeT!;3AV0 zs5BE$gzgn2f55n^mi%Vcu%CHBrIe$vaOKz820@nkLNM?-m~FHu%i?hHAX}n?uJ3!D zzAU*wwj^_0m);{Q7K ziAOF=2n>Ma83##R;G{(8rD)(z(ic8-FM(=G;tPd5JaD-2;((ja zfCfk;mo40`z4KOph&d_iO^*Q*q8$i^?pEp?Bu-3fHhQn6s(##9*(N$&APMLY3^??9 z@LQH)ed=D=?xF4`gBJ}GngmJB1%=v|y?p#He0P2sXC~0H$-ms26d-R{O=W44gT7Uc zLcOoIsI1!O5z5;Te#wBk_(k6t^I^eafmQYjn3mk7g%4Au+GYRS%nEt-6fDXXp1h@+lOOX6miz zsgs^yH+<{f26j-m80q&>w=}jL%*a^XZth`$J~2Sd?%5%p{xNNIxuRpdRU# zkE$>9d>qJ^n?J0HSYY=^%F;;c%dh$Hv4+^TXWWGK@8WiB^zMKfa|lN~a9(HZ;oc>ERT|HyVhbHbZ_a>S`D#WPYn)tP?xV`_o22w5mr)wjlLp6c7!%km~xc~lM zH6CtfzN1?^)>|A~?k7e@7ANiORAn(gx>R)V{-{*^x~ML*Y*_3_#hrU~K5S57k@B0C zAxiuucbZXvc&@|Q2=YZZplS(q_Zh$nb*_zyG2Yv4NrBj=O3?!>nmB@hP+W=4@#gtm z{XZvNXbwN1e>PtUUzcB-#Fec58u-bFo6m+l*ukr+H)IVOcm|M>WK4S3?WIi*^=xZ6 z2HX^ZI(Zy;2Gt=-6bz z=fp5S)jVonI9Ns%_+a&R2tf@!G`y#aaJGr8-7tS>dMN9%IW~;7hoqE5Gg>|F1LJk% zN}H9nE6Fsq-oWy!N=GY@Dpo_Ta8M3Syd$tfLpUroWC&hEzZ$!K^ZHxb9<4EFU^Ia; zsQ{k94`FMCI~Bl^;SeR*LZ`e_?qvEF4m3^Ql8%Fk`AQUqIn^Ek(Fvw{5D4OLh|{e% zTJ@%9<5Wlem)~@o*{`bI+s+>@@5$I1h%q_BfAN$9a~#gb%jFLYCFm_%?k5;rSj+K) zwv-v}AdGg}uEs%3T5za41G`eojj=oh*!d(k^p7L7jqzJ1pYHPgf|!*@r*3~i}IuNkt!G;`lFF^J5 zLoy*S{TVO_28@=DGCl(au!VGn8;e`8Bsy(c9cG57g6iVL?}cm0mMWAK%_bJ$bvv71 zMFt}l^M$k5YCS3J0S7dGAjS)d$81*wje99P0EQja%q{zte&4yQ%T1D|(|+5)HZXzs zPh2P+t@l)^u{@KQbG(2kL>op)!}*{b?)VJ&Dq@wQf|K&Y>av%fM)`?F42EyvY}Z$&A787&p;+|X9Pr7g(JVj*4CdUy2Rb*1XgOWX)9d*XPSqFgN6 z=@L|)Wh7et^ONtF*?P+36WeN|kvm$h&uZwxPffU}3tM)N|_y(pb+qRCtKBM+N-+(1e z&;7ztH_}V+{fZDsu445JP%8XsmwZTzWAwS;a^&e(ix@X~oI;F2_YsnrqpDIKUs75L z#$#b9y)JKJV`I#%EC)vQi}?hQBthj?w}Yj2t@J0K0Y51r-fJbE^FUE(DYu`fPMSFT zi^fc=8VmAPJ`N>;#Z@9&zeDMGE$X=hBEW{T3vpRiE1Lt-xBK;6_#+r4{v=Ee39Fs~ znyHHZx(fRflHZi&5Ey$i z^lwY8L)!NeyP}4a0&qg7#jN@}MP54DfSiH4{`Ws-Sza;P(4#zM&t2OQpL+CDqPudZ ziv!G_0VZ0DwYq!}a}!_vJ}V3>Jp|~tinzp|dj$=Ym(Sm%EN8Pg$;R8#_0nBe#kjK7 z5_*28o6uqNI4mp6kb0c|{M6*bG@_?_l>6`0bcl|yXfK%EZA#=wZ@%d9%6ZdT7G3u@ zFge&^s&Y1~(4Ug`+eO4v6ISbrhaDe3rMFgOsSggnh|thbcGJA`#cSh#YL2}cUy zFokanOxIF9@saf;<_nZsJ=EM7&Xd@(ebR4nbx0OXMs80O&5qM#dJyr9f7CX z8QCOC54Mm|M&Z<<1jVvHb;`Rey5zD_wuKO6!=>dQ!@2C+{^@Z_%YcJ-mig)6pbsOwq*l`8Ygav=hBiRmTktG8yVZ$5;z3MNa+P&`-yicn@(F^<=0oe7>YMFtJ?hg?s z(>riY0Jy(~SouOe$~m^bU6_94&7-W{(Z=h!6UiCI3yF4~KX;^%C5$T&X@Z}cpm$ra zrNJU}|xWR`=$L1kp^? zPvoa23UuhESLLJcti*q=g&#~kbX?ju+ zj?8T&Z8tR3C8Mn^>iKc6&Q$bjg z`o)VsXyx>_qko{yA{`6DdcD``5^c`{nIuJU>I3SLuF*!MBz9^cTF^n%0ikS_0n zlv3DP=(2GQ@25Bd9|W_W{ThngA2Xei{gt)R*=*wr?z-4jz>VkX~g+q#1rJEgW%g>b^~&Opd)iJybNhl9{=rlo1D?2ypwrkJ(Ynkwmi zm-0&XCNUkDVtsFTc(eCInADQtC`QTncTtn!qCurCj1~%`lO8=3Oausih@x1it&+&g z=G>645PeET*5tMN7grv{$xMe*;LwXpx%ywSTj8GefF_##O%Xz7!` z{}+MOVPE|=Njpx%KHt`9Xa;IK;rXJpl{vMC=T*z{!A=Jg*r`Qbz8R3_mGfSMA9t#5 zziEDg)Hi&pRo(2HL-DVirC1C&PCLu2C67_Yo&f@EOX-HG6_b_&5na%%IWe`AY9EL@ zZ>Mu|>lRF&|L_?=9Ha_EHpBB%o7=j{0?Cxu#ytZ_f}h%0{k1nUcl#-A$7QAA2P>U9 z#&dXC{8wuZ6o zdqA->TqVcj++R1;L?XJy$;oPKpS4|^oXK<-ZF1yW^fds;iAaB5FF4<64%1W(gM_Do zP9G1vQH#`NWE54{Q;G8q!VEUp7C~1Jt42nFNu0v3&!8awP(_P(9w9Ibwp@6!i|S;~ zPo`TZAsXS=nzxl?o-(SLgNf4{fA00t z$`nI!QZb3o_Ko91bbI}B2AU103K+BNc!%6ueQDG8EIrmFM2f8)x#*AUO<9NczLzN~ z3Z2@33h!qgsQpUuNf4sV^A=eVJcB=SsFA>zho*C7aQ1cHroOXLu~zi%kI#T`6Zo&o zhi#IsuE%K(r%z^rm!P)B!=la^3N5KphEd^TGmQ91F;Z5q!=u`ejx-yhs_6K#1>?(2 zYH9ziHp8bWKL!th!gSu2RhpyWKFPRqY%qRo>4OX}5s1g~^A!n3LQt@sH)i$86Rksb zZ14MCcA-1?apJn;0`=<4+gKvENUOCHMmm>=-QrRUbEAlnI`r zH!mj5yY8qh{{1IvSQ3@G6!(%%7uWXR7%BO*Gt0EYVg4;i+#sV1WwS=fWl7v>js*>W zZL^KeFmb@05XcDbUE^1#0&6a;^_!*AFOJkNVC7;aTzZL$ys4o=!#-d^Vej9Kdtk;o zXb){St6vfD!li6mffJfIBrN4t_2mF|4kz?*En5d!&qC@ylAu>E&q9#a!Lq~vY#62VTa&Mb7UM<=g~AJ07z!EJ)gbRvTa zZ~u%?^nwIo*KHZ@F;u@b=dTA_rYw+i&&^7;3=+ovA>H>c^}g zY?AiX;l0x#9`1R_@&@u;Py&xwxdvbar6%a%K@x5@626Y8%eBp4xGIP`9qg)P8DR_v z0IqG@U1|g6r{=$xnNHY#TaqnZBOIa^+y6Cfh1|yx{VUdtT!HvgM|vwV@wBcwj_9Z* zaoyGsZ-L>}&UxX11*;N8c#CDx3kez0cvRu~uFAYi+~HHZF~KlxdRm;y_3JsNau=J+ zp^diFSf@;m#xMQi33?f7m>jy~qDQiZv4pMlDk$decUQG!(7c^zKwlL^=L1E3GEPgA zH_^stf=Z;(DaUa59gi%mNT1`ys~v=0@maWHanS#%`1bt*i>>W~=E~~JcFe(SgNtT! zCKJ4kq>48@OzD8B)KOp;rjxV-D#(^-$2#P*H0{$JPmGR-M1=NA?FFhYQw?k3DckYL zg_5H1pWnKlJ#-}my_iNcrUd9?a*$P7fyf5x@f`24!N)T_uB_4!q~4uJBbC`**kk7( z>C_U1<9{|q-4`Y|B_+c}#;*pr1<%6Yb3gIt>sncW$4N&e8rPVt9v%M*e|Gbr50sfN z>jOdP!~fVf@x;f2X3dd<~!c-1s!Ar+l(X-MwaWQhcD+)W4R2^OEi+h9P^p6>^f} z!j+c)$E%u+xjp}g%{d`YIL(p4HAAvLwl$@=67>s_xU8+6c3}%IeH5xUa<%x-&WD2} zm-G=RQ=Q8q2Y(~s1mml=6RGmXRHK1bwJ?w13axGFt%HumDE>fk4R`s_N#f9Bb6&Og%AF#U%0-E1sl~o= zhbz_uIlymA;ezUofngk%O*69Kv;NNzYs$KksI*{`F^z-Tn)yfc7wcx_hi%vlce7nQ zl^El6XG|RlU9{{uk`%dLcD?2hA+1mOCUL!EhQpU%7Ct9LA3U;c zc!$wUI8A+Jn(g^8$ncUJb)^M2+9;(|AE|9A1H_;KZR+L?_!(pc1X>M=* zYnAux;gSHe8V+?dShixqTO*;HYVpc(W)+f{zeOv>_;SDmtNsOh(CI=X74}*>_HR!^ zVAhM3PJ>-{5j>oF)-NAY>;3pzTn?%yCkmteZoTw z-?#NQ?d5$}wBHA9uDA7p`?? zquasncwCC6N8n*@7So(F+-YGatSu2|ioatpkG{mJ$7)s6(zG&tanE0~T5!cxA{rvq zL)jxAt&R0i%ttrZ9VXA3XRlP_jdjPU#T>L9_UVXoIn=}LCk&fAHsYI^!7=No;GNVx z^K6;$r*tXNm3yO20N&&xNV*Q^``QtGi_BEs46JZK` zzawruGVU!pd1BMPWMfDv@pi)18^nYZ#s$f^sf*6;1Fb1-aanu<_{EQL{X;_N7O>L& zxa2jQE2R#b{!o3K65c-z`N}kL7JMx}(GkfCN|~m-8-BtvVOC$}HH@P>ZlZeSncq=c zL=Gyk{YGw-eb?}WrHzsGAR%AI=I*}J#o~Val={68QzY*D`qERNK+<`v84GxwqTC}` zmDvU(N-{6-08^;3fS%imE+8ADZO)2}o1nTZ-g`J8O1R|Dc)p0IB46Z`e%dhgJl|ex zji-!3n%evuqX+w&$(Gxg+4?99q5Vv|)4ORTRGM|x)cW63nY}Gq!w!d?kLT?R4F_aL zaA7ENTNj&#j;s`N2bZy%gYP0oVNB#e^aMGTrEo2U^pwj;0M(9Xd}@GgiA?br{x=lf z%2bgqb5J}95F0xg|AI2=g&|1%i|NdhiZmk55{Q*hJS|Wi5Zrb2s!uHL&(?me;d)i+ z1(VDOKlHa>k}eR-#VCI_`>>ORDG^~3>b$*)V{k-vd&@~5JGICqps4}ksJ2G2>JiU^C@IZKwnsUIG8A7oTO!QH$Jr`7` zJ$~>#=O@^`#Kx~4jc0%@2+mlBAbtkiC^*s)`?2){%?N_DoyL=h;C)+@)V$t2(k>yN z&KrXUL%U3ACAkYDeblq=Xc}Qn4h|t&Aydx)m{QUz@lA76OIqI(#@GG;jx%b%%t38M zOY*pfL(=t2?+t;q;8~!7x#qs zAfLejA+0-)0GT53U(;6EoGez>xiajU>l__E{;KW1Hm44Nrje(~<91y>~%w!h5 zlsP0$<2_el`kpWTpVSn6jt^oKZs{##tsw$I`*}=gjzZWGDymHCsRq6tco3PjSXj@pt8yzm@$%;!-16y6)Mc22hPO&sLG*U zFv0xb;L-K~+&PbLzPE37R#zpvFqUg;NW@5Pc@mxU%zp>>ec%YvAzb%Mx(S$zDx|l4 zR9`NB288W;X1fj~h@30d&zyT5O8W0|{-ZcP%pRQMw|BZ8S=GczRS2=@E5gsRpx#Mq zf{Y**n~~#le3tXj^B!2A#+yAW>K5@by#{_a)+LV+OZ*rLrMxP@3POtO*E{szrm^>C z({D3xVeeiGy{{T*%A=z^?40Azj9I=!T#I+7ufWZAh6l`|{&G>ei`NrS^0;@*LuSl9 zPfUp%^Dzg#jdFGCqxA~-tS`l%0llQ2`svCM*5(Lywi&PEXl~ZZM0urubP=1>Vk{RH zlcr~%ntMjpDQVRtA9W8&iAgA?fN=Gu;BE{$87ew^^q{T)UAJ^O;Co_+L3_I z@tgHFA+X9^d}_uXm`1qT4rHv@#ULK+Fxma zD~0!nM0iJ@j0VnN`j8Na_puOU>54Iu)3d1Z)AFJ=;O53w4&(8R9Yz6s+c5u6=WGi} z|8~$ULo#GR@9G3BvCcLU*l=!|IAk(lWI->y({1~*r$}Zg*mjNbLOcV8!2_x!uV|hD zvwGb>I@zaxs}+9+zR<8OyF_O8&j4Jn2$V;zo&4BWk|)Y(h)URKd--f}u6@JZ#;;hm z72$D##f^Z5yIADJgD!b7SJZB!-%dCeDh5iAGqHMEx#9gnG!A(+Ke1&6CvJ)+ZYq%N zT=L1Aa|z?|M08{mmGc$FRR!G-Hk-tV*r^+5biFVr{>RFVES$fBe-fd&*WU`8YmyDh;!4I+_n-XG4 z{hL@pLO1u~opFy1J8>|}x;mBHH27vxYn#NvL(BtFXs=K=a!9WTV=LUinzUxeHrl2F z8tkxam8vL@xJVNEbi4t20!v`PfG3k%98@>{aM_<6U5RpWb3oSb z3+vv`-G&(7s1JmoWrt=M*|wo3X%Spw9dwEjW81PUsvG-{3!Rx6Ton$6h+BwEmTg<> z6!b7@sE&{88_6{j*EkN4 zb6PmMw)=Fc=#(L$q<=VwNfgS5PcC^CAkq7p)=`po_yZzQT_WLVeu2Cct zbn7J;=lObHgDAuE)J@ap>^E>5Gd~)XHN*n3e7QCJ?|14a7_|vMf|aS}B3Q<=s9XBR z$H64VCU(CwW664b)7xo#qBZY;Q)W%Tli(dLY=o>U9YS=jq}wP1{gS!wocv7gKPmvX z+HX!BuI=kxrAG8_vCIk;$`x;U2aNX<+_@c0pxO~Xi0!ofY2wfxQo)slQ}^%2@2P2b zcES`rRwIf3>ECj1Z~NNl%4W=;)fIAcqQFDzF7h;}x& z>3E%XPs~#}L*cR~?&)3oqANqaK^--qf#r;Cwr4B52lYDVoVF+4`>*NL_-Vd_sZ#C- zz~?9Lgsp<*w87BV-s-0u5|^Kv?O!YM%Fo*}kz8{egu4czYz3;L!9V`(b#?j1SzDx{ z%rgAOb4b;HxmiNU!G)?Jljhq^{f(m>xKZMbYMH$VXMvbcX;5DndE$?kY|>Nufa&ro zJa%7Pw%RJ53DHDTMZeWD4yq@wL4g#8$O>S3QDfhs0g5RaS2WGqw4q~N*Za4&w%%~gZ{;vN6NT0#SVho%SlqA^QP}E7>h5AjrixS zSg?53V#VmTXhcsMj)2-(g1hJ(^Px&z0#Xg3`U%@N98OOu_K=DCLTM4`x@ECV?4QBl?ouR39Bx`n|OD`~&dB&sb?d zGr=@^=dPC4%UvzEMc&@oKN2Tt^ol=VQ*$DyPDn@$!sT4SqWv{&+H$dGu);hagS8p zz4S?d2wDHl;kdF;>|x4u$sU?Oh!NvIg_1buk%NuSTc}N8-aBmbT>qa10b?b12Adgu z@M3?j2m*zxN(ifh&>8DEf%KnY64ZXlbpM+%P>Gjz{;tb3)9qC+#`rv+v%Nqg5XE9x@O0L%dhmBMlTa#fJ1W_cEI(9!U}3fb4d zYK#A}Xa8px;aUA%lx9{R3X{cOP6cmm_A>|B@7V2rHNMa{==&OD@imvL{H0I~vVVXe z{&(Y&c9Zp3)M$pf_VKjLXsX4qTyqi4yfs7&>u~`dxWysaztWZcSGuTaZpJd4M2P&F z?k6Y;O18oJRO z>6*9QjBW8OTQ*HMibRta4L-K-T_^$R5N5|$`xOjup>7^);rb4!GgQ7NPtT#3_z4Vz zb%SLVI^SfgG!x!gV<|+~l6dZTeaK@$G_92glr!Z=)k(ZE-ogCoi#p6Hfb#Y0W;V=j zTWjb+=*VlVakl-hT}xja7v2m|RAXiGO|IXj`N7LMUx_dWo^%jkB*7QG-qL z;MgzKbbfKV-e&+t5WVF#7rMSY<4N4!&^L@R_uI|OM+XkNKHpb6#{>UlJ$XMM^HZR7 zxzo~Jh;1;zlf;iZ(^MurE^)S~Jxv!%53{|yerIgOycg$LKW4?WA)($x2sSFN>K zZwKd7rW(D9FiPG+yE!%$tfG+>3v@1%7dy=P0x;g%{g>+vDr9$ksfDTQ%(B(!4f%T* z56m;sw^xNK@S_l{y}$F{0yzHarcCFu9ZR%R^X7V=q&CL^o$D0E?e4>r?j+oZ3;*q> zb!0Q8zeVTZbst%MOs!eh_mjkIh%!C0CmOAE(PY4_>v(ZMcdyXuyfA|Lv0?GcdBJvxQR;5K)GLavXA5ZM7w{}$hZjql0$xxugt5!3mA-@x-zP)a+ z-K0m1{PBx4$z%}IBFA+B9t|h(F8njH=)#`u9LN;?3Gb=tfev$@U|`E4O_C0(f?B?_ zw$o_qXkS~mA`Q6RY22Rw%Bd2aGL5y((ImJRs1Lb1IQ{TBZ@>|+_^_GA{laIp09v8z}_Wv+t#|+<{F8n)Zk4{qB z>E>GFn9O}ehH5L!-|hWI&2zP`R&>W3YCMi(?pAEepP=`cV|VTLdiLd?z&Dr7Bg^^X z4ipL~3^x@jLDbo?p3UM*Ag(l3^*ZOZ(RK}m-qROrgz$MO(o8ySgNdrQ9+EtjNJy?V zyEy(GuSQB)0)puXX+=SB=9WH3py&YQ`zlV6P-IV;T;^JPQ4(@9?K|+xPH|JXFP8Zy za;$IWxqRli6mAKY;+B@~B)(GHVw0mY?F_c6uZS#BlHO;Ge}#W@sI}ATC3T+xTai5| z(GgFi4n36o*j!gNE}`$oJ_-}W?;ac@j<2e=pS}{cuXeuKRwf!E4|D&e2YcCCc?mA8FkPx;DgtF4^4U5Z|4C~c_zJ7+x0c$%h>5U`d#<@`#n z97%f7kG;(TL+=lD-mlxLy;w{fd_KTe7T&y>91dXvKQY2LM)*D(3yF4=vMpLpU-f%U z-Y`GBY^&WQiM=?4OVzK)pWvsm4%`lBDx;I8BP+0^#Oh7OXd)#^ACa;qyYZc+QLIuq z@Xx9coaw9i*>~?|B{TyA^{!i@ofw4z{VtwPefx<0g2@f<^L?RQ1mofz6ePdh?;O() zF&_5F%`Ph6B=##XUGNSxVm={fXmDac8Se5nBfL`1m@cy1-Zw|mz{or|dZL|S-&;Jj zW-XR|LmzuA!8p{jMFg7=#sMawDp2f53kSwO&;#j_=Yw7l#o_YDN zEX(+|+(t~XU;epM{W#^<$A#h7V6wjwZ`$8&KA2cyp#W5UOkcOgH0qbEyyAAx?(6I( z%Z^;D)?s(N3>UvJw6Ij;rOpW}s#$@9;?OK0*7As>{7s#7j8_8<0rHbO%wceJG>mRrc}z=$d_+ z^`Zm_pySWuOZyh>t%KQ)Q=dcJzV$zMK$B%qUQ=8FVG=3hLSz(@3n*VUu%0%tanjn7zl3`^wkblvIudUj?~(0YH%lG?|SW2 z4^|AxYnwH&7$e-JHzw9IuC@28RF#TygZvg};1%eFt1(Aw{$@fkqAQj>Rme%}bjo$ZA52jJY7AzsJQ z5v&%Zslokb(&7v9p*3y`-)RI(o}iU^S8a$bO)X6`DL29yEzR%%EI?vxYAnEI%Afy= z3qdTpmD@wvL*17QP3sv-xoatP$}W>vN6ICG8f0d zCX?G2l9^|1Ph0pYw8md}gWH_vaJ*{nT-1$E*YN0IZG&irzeZF`{{;HaqNT4~Z!0AI zFqR>gWx&Y{)W0CLJ$|r`)JDj2s~}c z_w(1dgyaP~sVQ}OQ5s{i*Xd##*K)sdf!rbp#Q441m&j#FgvfKHLCm$>VH9i0tQ>`@ z#c&DEvEwZ6?dfz9uDYaKo81~Crd7sJ>-efx4s6!a5uq}SI}ahbcqc_QNkGk(cs*h>V|noqBrdR1Dkwz)oWX4{zp6NO}kE`hashGMalR zzL<>TWAYcAJvsc4^jRZE(urO(F-Wk;+C2Rl)B6%DMd*0Eeasp#4-5++&w*sb(wU`jB>{Wkn>x1${Qe3{ruO_t?5O`Klq^v3h^>J@86 zs2C24vCt=GKYXcd4Lai<2Em+(pLn8P)pg`+VxTHL5qh~2{y^Hg62Ba^I#|6}!xu_` zMq+3*$=B^i1KaSkqwP^3yH5IUtQZwWWT)u10;ox7u)k5scP)+H>qJzK13Vxu0M+)Z z`D{r@*NcZFR05^Tpyc>2=<3eL-5G(nRGq?a z^7uajmken0-Wb6&@~(PSs4jAuPX6^<;sISZ#IhJy}v{5Kt4v|aCoH`qy{{Y%I;xyk4{tS4R$2ON9Rn4xR>uD%BDKq}=G7qlj71PUqw2-9!0=!f9 z^1qON&bOL;gqZJjTR7i6{{Vcy;A;xi9Xf5J(#oNHU5-EYM%6Cb&6Xs8ie;t3byh*Ki8hK;;Mu)8Fve+g+ zzbY9&@yfUGM^V1j{{Y~l9xzmynms?jRxzp$hkUGaUJ>!;;_~fZR8*D zHO+gM?5wh1{@=eBwG~W#heOgWayxS0;au37a%uADbYYa8RHT18G*LzkeL?4i6r~%e z9A=au-L{p`H(@q6r2!j1cR!6xBL|?RSkCY^`{t$1AS`GaJ;t}G+v#?YmzHUj5l3=J z75nA;9sDZO{to`lo)-9JZy8IOEaQ97tB;uf0Q&3k{{Z#{_%A2L{{Y%QL()Z|cx~<$ z>Sa;RBjkQ{`)A-?Rw%U~s|k&4(O`=p^vC}Is=fz}G3ioNK*E!<3?*(c8B+|YqE}d>ZUfklke}%R=zBhB=w8uFDuU%!l=y<{_6Ey!KjU54h|_e_vBOTQ_Lnd|AE%2N#0Xq6uieS!y4zbn7Z+Se zopZ`n4~1xEvSXf&UBqaF00&N-SA8f`cx9lfN@}gq%J`4o9tXT27#ve~Ort2Qdw&(}P|nGHN)e=ypLj zOHlEbf+D=v?VjgXlrpvn4c4^0W#IYj{52}sYBNRWVVegy_NuzJsJGrIx3}B(nB;@g zrFA|zm4C2U5_YU&w{wL`jVSacOOiSs6K~+L{{RU!yzO+v?#CGw?>a|-S}iu(D6ixg zPEJoX<{AmNx$$kZB!3Xk92)nXIaG-4q5}X75mSI_mJPcm{EX_~ogChq@ZxCoXS!Wl zWj=uQ;<>LEcv>@j%Qme$ESDQkI5^K==f!%KqVYqhB!$Qg)PwC@Mzw1OtK!RRc5#J} zxz9s_Ys{@q6)PjHxmfIU-wbJ5cZT6vZPEV#a-?rL#{)H;XQWxdXQ*Eb7)S2*q1n$p z>ej!id2vOj!dPN4)3q~Ryc$P=F0bS&7^qRW^*`j-p;fg`%dv`-TiEc;XH~hd*V|c^ zQ5<4mgl7PE$Ln3jkK)@8uo)wjOEBDApO*%_SK?N=sq2=XV$v;{vi@fN+4uabW8mej z#;u}=HH+q!NQMM)9<|^>)lzY~*SX(iW6b{3u&(X5MkHr%-6u+10RRCbVW3y=;$$3a})#y-(Gtv!Fp%B-(VsGT>& zTOCz&h&)AiAdv_Ia`vuL4OMprS$?@S=r;G(o+$WVCY`e{5^cxzuOqjeZRTjCae)1=gxU=s~qgcgv4~x_l&muJI+JpZoa6y+&K@i^GzBqP&0h_qikCKZT)sPaX!8?QBd~o>ntXv$4JbN9uIR53j)0|wRZl=^zjYQAqN6KN=q>d6#%)|1j+QxT6 z+c0zZRhxZ3Y6fx09E{hmDLWo}m5QrtBc=p=9^#*Gr%La%t~-NT5k?~l6m>l+$JyOk zLoVIN(z5n~)s-!D{5Dtv^%ZgrI7kAjfKPmM=B`6(xge*04PMi9o1H^Yi%hUh^GMrc zPwx}ju&Gf;R%sO5vG-s65ij8*o9_^OTD`Vyk;W}!k^6Q302=*4@CSlyuC11Pn_QB2 zGkco(llB?-F#iAq{s{PY!B^`T)UR!p>rzO^UE@8g?4JriZ8UKs5WH|}^9=hD^^C`r zbUvdl=7t%zj(1=1x}?YKQ^=Ukerofth&Ot+oplD9Cg0{T!93t`{x#~}BGqjz?Ved? z9Zm+7Rrvb&3r)cYFTM<-rC(rjFI!rbN&SJ4a#fQ@kw}CD|IzkXxeGJe78F1G~r|D zXPKnIJYaA=E1CF$&GsDUwM%c~8)lS5W*Ns?%JH{|tYx<`z4To;JFBj>YF^2smYV(I zMyJAg!rHn4)s1#$cNi*hUPbWJ#+r0D65Ia(X~%%XWHomK#g+v}{7hGlzcqEy#8CDT zYUrxkRXS;ln`hmhx@*q-N321n__`?Cbwbf+lh(S1@rA3{$OQQzI@gAH^Iz5PJa(UC zw_-M&hyIo3V<#Eck0!>n7-(AdqoTFN&HAHu2;D2i zZ1juSd{uR2CcAWGE5h#SlUy|@)Q&ZDHH~g)p4VmK>s?C!01_eG7;Ow`{{R*|L1_<`^tH{q;o?(f$hf-bi%|jSE$g zHcq*SIr`*(D(1X+k?DRby0y0$MUd?o>0BL`h%RB&*HW^%A=kJyb+yZ@*_wN)3o{&+ zCpG$?4d5}jTrzc|X!h!UdBpjq4;5K^2h7)7Y51K2-Y(EopQ$w}+({yo2O000#De*m zaD)&KAXA$7?fKM`^{=2+BAxmlAmw9Sr3$4%&U@FGe%p4Je>#80{{Rd{t!sV#oO6@ z7x3!NJKav=)=h`b+Z=p-_iP*|y;d46;o}&sY3=`| zJIMd;`T!3X7mt_#pWuIn{C_fc?Ep$b018JD7v~w^0VNJDCC*(pfc4%_y#I0U-r4_c zI1g|i;^7nAyFqe40RHISeO%mor}6GRz8@WUza8+95|8SskP<$%t`)&EkH^Aa;&Tbv zURHL{=#8P+MXWtTh=^(F=ouI}IJus4^N5OxOGrvdE5A}vRePj-bz8-acSoztFGW!onlIMLCVd@3bf0xJ*dXTo0yAHR&xt?VFT6VXG_SbL5U)3S@MaiIT$ z_CLt}-vJBx|Ap-T0``C7ng@{L;@nRjE+s$#aGfx|#w{HJiENZH>Y(S}F>18_?y87T zo)BxZXDzddkMlb&buA$fJGz~O33cYw!W=Zv^;MszliN}%oTbMXhR4*iDo5eFgp;&`M%g-S#Or;snfT-nt*L6#SHh?~L zA?YyG>t=f3zj@q_Xz>D)p)I1;+@I)>8I?+)q4i`JGr#`<1;@S0+)W*bZRp*B5CsI5(uBXA{tOj=mLIf>%U3T~<<9G*hQ(WLQlv9$YnpnoNW{_Y zdr7=ch5RtcsV&8sUB`^qkNfAeLdr0+E%o%?e{eZG1&1*I$d}+~5%)XMuYg81q%THn?6Hea;)blw zK^n3;RydR;%Yl!lQH>_CL$fEfwzY~qs-PEqs7xdn(#cF$c z*Ct~Xw=`fs{&(-D4z_#^L33){%U>}a`8WABVZl3fCAu6h5*WhP!5sC!*Y&9gvOfdJ z#mRNX>yJnS)ItVW>zw{_bo=l}KAfyu?^m`+{fqt$wC*5V^!^8rQMoc| z&1P{!gr;vS{q2$5?qb5g$V-hmS!N#$^c}{#UNnDbiM6-xvMz?T%Yacg?0Gx&iuNqS zLA3rA7M@{j13Fy!c=4&m53% zRTmjvsI8gydL+V!0+UVmL6`EmNZvs2J)P-_@!ei-A+m0VaE}9%8M-;ef$*Z?1MQ@_ zSA}dm)QiO%H$3HuJLsoE>b&HjOyUyGsp&3^!LI5_X?*;bz4S6js#)<-`scdoElynu zc8}~Sb(q~T=CxA!YStNbI5%EbwxJu>a`Cd72j>zxh^@t_xg|}0mHynM#LJ6s)y3!rfn54vp%V$^_A`T0%2z(o+}FG&e7&8*zPK|5R0&OPCDGoJEf5v7 ztsVnRb}qF^bpy6()VFNhp-1TaXc@k1aH7px8Xwo_KgpOHeky13K)^W}bgdy2symca z`?sbE=<>L%z8;r`of_Yo`2ZOjxdW#N=cl3_cf(DPihLJ0{iov8+*G^qk=c9_SFY8} zj%kVN?+G+@18V}pk-PoSae(eyGSz0-O4Escj6zrAFltC#z~dlpJ94H5>rpmjU^@3Z zg7v5sl5D%(Xs5a#6oqCo{geExS;>R`4q*QBHnBB?|o1F zsGNjJX4g3Q`$#m-wFH;t{Kys!^c{L>llL8dt1OL2yLu@crn9$1kP<0%Y^M+RtIldm z?xdHkEm_>I^k~sVY#N|*bOXN!W2lg$!3Y97i^Kar5 zc1ZY99`xz;!<-_T!~$?ew;`iZ`a^uw}3vPUb|Z1)^9)>YE+ z*oWONnF;soz(i7VUdXP8Cw5w#Uo7gv>bNMPS3lO^T`W(azQF3lx5?sSridsY5i+u% z$Z|*iRa`}}P}qD~CN0A1!qBU50UydCY>bZ zBOf#!L?2*)23Fq`j`fHo)saTrT2ZNuo=g9ba~f}^LphmF)X>AcqF4eWz_&|H4mPf+!G%Dsq*Ok5gN8O6B_4j!I!d#Zr=U)S6LVkEN3(Ps1fnQz~uoYa7HxL!R0MSJaB>LPU> zVg1*k^MmDLwdRhAb&%b^Hs#8AjtHxp}+Pu#94*=gFj| zv8X%1$#6YEcJf$&9I7^Xm6ge;$46St_i>%b?Kyqyt8^pe=-T5zIbQWE7CQpi236Bo z-bm2X>!;Y{-wQF+UA3MA&wdVbrK_b^-T~@jyJ+I?k7zd4EVQ2S8QmGxM9mYG&jmEG zUn!GTIIK>cjZ4nlrW7e55fy zy`?AV?Nhu!J>`8r{nDgua@D$Bd-{RIw$(sfG_ESDhVIhf8*152P_+$iToAc#Z5@Pj z@`z%+7t&v0x}MtQuYhNnx8t{KX{yfpGbV1C8}X75$vlJiyJy|(I#_X>_nKtw}g>|0>NhM)h89AUlwhU>rSa(x}&G9dkB z5{~ADy?k0}wPVT=s!(${7 zNOQc=4p_9htrsoIBqCyas9qPLeY*J0Z!UA`sqraIjZkEExln!IP23jyie*@(-3eIUqvctjHW2_ z6}l{dchcchnu&*>WfZTqbiEcv)-H50wa!Ta+c5fKhK=p52Q|7nxzcPatwh)2Xh^Ty zcSfJCxCahe{-wJhLMRl0roJwd>8@q0`+EJZ80+?af+O-syD??_ke2Nn#Gb(nkuF1B zAuOJt=)Su&>#p%zXlm|#IB)^EJ*DEF_g$}syFJ$A%;nfB@x9vXH!)06-Q1Ad1FCBi z{kGBzudc$j;aV@R0&sqil2RdPJ}maYm0T|56ju$|%D_!3V=qmYR$iSnT$OOH`kpbr zpSO<@Ej!7;<=p(h5v#qNEXJ)NwDkkzU{i2qq#LGpeT<0f%;4CG4+86V0ewhU4M4kA zRnlSqRGvA4o)brUEJXG(|14iditLkt=HFdT+ZgCkF%JLDoeqcpn>gh`*()+2j5` zEpGk$RNfX~!yE!iA6Csz9%o8~Cy0#{@B-dHzvkY$^}$glmo^Z@>_sO;Djk-MXM256 zm*RT&SY`424RaJWXFFc2R4ncbFk**cod^ z84v58#Amvex|i{|{W503sjrQT;}iEg5WO<}7cgyQPY>`kI-?nlTKLY)^Mv6{>^qvI z*AU1s#^|Hiv0&xy5q(;uon=va&M*It7XhKJU^UNq*Y+jeMJLsT9w{jo^s`KYmFe25 z3~Uu3Sm>Bz7r7(vpiFrOz(E%HDn!D$Vepp%hqKN)sSXN4yWFA>>MrNsBb&zH&KGhw zPGd!14Yo~_4<-Vj|GX#6jKP}i5{+ik)=L(a8ih^`e$mW16`;NM_u-gEys2Q}Q+gAA z7?II$+|WLRPQm9OqUqDNM_}Md*Dd$o6wTCnV;3*Nq>^63Udj7=GUIkG>R>AinFLRK zx#W6`LmDkCyhKi*{1#1D#D1UZh}zrP<(|6je@EspVJ$V=u#i=UI{=&yE?Fy9DU~L@ zfs-=>^$Zf3tl+=p?aJMqb|Z?t1MJTfe(Wwukk0mF-CRx??8p|zex6?DLme-gidBDS zd@b?)aS(It?@D=YOKf$|{OCfnp%b5UhEUFxh6Ua{=HiO=*a?xVNAq#PPh_%Z^XyNB z>>g-AR%$f{)ZWVW_eg-z*o9AE-uKpUzOujWJm;4yd?%f?-Hjl?dTMK0OyQAQ!LGRB zn8loVbF(@SeN`nCcLy+nNnp*#=nKr+Y@JZ0E(CV=Y1nuoA>$lOOJYp#@?@As}ly(2t3zMQU6JS_c*58w@`+tRr`2(R<;*=9y&($?bEn&3bJ-YXnS8CMr;^ zkM`Url?ow|LHUnj_~gq%i*kXk{<+<6PQ7pC=kg!1o2ZM1yk`<3(?5{B^!_DwJnK5l&|N6Fund$>O~nJhd$WX`F+W8 z%tVHobS62peeTo)B@coYO%;`C1hY53e9=$GObqe;iPZ*RicLPJ62GP~FwT8g5_I#; zVbH!rpnKf%1-bcSoUGp^;zcj^+s=noKmn{WG?jVI6VW8VNJP9j6Qs9SC;Ql8-Rk<-Q2m(RJeIi=-+&N-~w&&8sm zUJv^*jwyZkzNz9wJ9IPr6L-0&)zSmx)b2_w%D;2l)+I>0stM%df zKub*9f$BU>zExf2KZlihO!51)9})(Oe#Gk){P5cNd~+W^{LzGeN(#!l2oT8eSiSh) z@i)5* z0JW!GH_88Z-IvKj>O!%joI?fP@cTTg!d32)&sJ@s>QaxhYX5<#t$(PoTbc3aNMSjK zQH27rv*#0dh8*FFJd^frUew-MF}bC^SevKXv%?+`n(e6yNgmq2omJUPm*+VKQFd29PkxUybqt5#1pM;xI~4|141xe#qibIZ{=A$c7tWK&;e z5)>m|?LqwVL4UmfA$0n0YX`IfD%RO-OsL3Q2JD45v#t3z4F~D$mwBfc=Pwa|nP|N) zB}?lIhBxAJCrzfeJy6K5ROv#P&6<`l%Q|7!q0-rbzk^`$in#KMK7dGJUK6^@+q$Lpz}i>7e*251D+R8@B`~2@+}p0`X+XU-i#iR; zXwlADn=K)!JZ}1UD1*GDSKp@O1bWojMN{U`SQrgu$|h7owCz}MpwCxmivgE`q1dJIAAT;lqTDTQzv;1;8m2XBBM~h!E@@00RP7r0*H0jr~gWS8TEExL>|_NkB$aN{O#q z$uM4b4;i!~N90k?ex^I5w+RxVfE%EmXVPC`;b?LI&x)ch7Tc3Zj$LHUDcZEK_nXJ7 zg^Jatm1#j75!(qfxkq1)&Y&o4ogVWaA6aK@BHh&1@&U%6JilA^1TxLH^@@ExuD)zO zlA{*b++|HUeu}D&Ww9wSgD+1lFg#UX(F9; z9Gg)3>^47^1F7-*kNqE3A=9?$c<&^0{ z8NvY&|JG)+bpCzfE}dJ^tG9280MsVK_mgw}_q)N?Wau40tCSr{)<5;rV^_}>I+^<& z$vd8YndH#fM;_?(%Pqu$`VkZ8Dpk9b{ZBj&o_qrY16U6-4^4RIYyx!=B{0%)xSw{6qN1D>cgU?(CnXMj!t>d##+6 zmt9X2Ru|9yR6oulYv<2yar7Wd<)ZX}n#uQZkJ4VWJ3a3x`Sd>PTtcgBpl8|4qs#gWi0!$41HlYefffI) zEnWdT4noXYW@s}{)!7Sq&3z?#I2a|0Y|!*_p!Mv|4)!WiE-AepCs0u2eeOt(3@#;L zPXDn>Ny~Fm@!s!!J;;kyW_H5Gbc?m|N9e3~39BL4to&2AO;%@Rek+xGb#yGY#3qMl zwD}J%eo=z%TLdc12XDE7Kl7wd0nj3H&h<3EJ1|a$~8#c6>ika}_1^ zHE}bCpHb+@*n%PaimN-y4xE3n`<_NlWW0h$jJsyKc&BJsYk6DE#l{1=Fz6XMOrpy1 zgtg%b{jgur{`v2&;#YMWkH&taA(mOlz(-*LpKfV{wLq?QFPgWwLbN3iO;S5EP=7>c z25*xB8Oa?0*I3g#m0BXTR|9v1pcM?`6)lobPHi|fCf$)Wl!MtEu+HtuNhpr5r5oqz z2_0`Iw!;eLnXyG(2`efkj)ufNW@J^5z_4LWIeW~vN);zE0Vi;KpHuDYZer;8W`)ec%a-<8#cK^{qff+duNXFXonbQ*nkI>1ntp&RWyIt``31poatZZHmr zVpb+J1Q=Sb#?8BR(S9}4`P}puMY4Sc8?g*mcqD6*F70r+*i?}GN45@`Y178pbQBRp zfo)o2A)YXZB|2Dbu3KTQK#=uXeybF--XVE9}*^`#}#M@hE(4tD)#MS1`E}%Q+x$^ZLZmIj>;~$P7xJAwFzo z@`Ghu#}UvCjt*-Hwm{M@DXuhvIz_HkZ!)|TfIGG5!OxALE+10*n++w%^dpfzBnsBE zrxS+8%JhP|%4SDWMn(TPYCKAOoQ2cAUC4We#Q5D5+zRi>6Xdkd$g_uFNQzC!P^`|M zuRTVgLH>7u4)Y*-Y~3oNn}mY?ljk6n<6DL^TtbCr_I&|E7BGwSdW$Ty=-r$OB>tG8 zttRKlWTA2-;|{=ZO^fPDjgS?zDq3X!HEJK|#+A-7dVPvOpTcmkT}aCa3pq4%`qCL3 zo@XYHs(i7%af)9SAdBr;ryzEsH7SZXtT-cM*dl}2s^XG*V$XWK$&jp}@}g)lOuma< z`pEh=H^%0wPyJ}~*6ZMkr`|6vTjF%`0-;A^W4k}ffY#40;XfV@^o~8|G~*g?Nc3bG z03cY%dH+H_nQN-1_8@L4ncmT+e295eV@KNdwBx#ICQI7!`AcY-3tM8eVWraTk@EH) z{J?}}E{<*HHt{5f;xI7ZaUk@L$ZXzp+0qHS{}#f*h?}^gb$gyyK!t<4Eeg)bw(z@5UW3 zh*E9!B*^>)3>O>g*GW|=iOP7kOye;MJe3U&@fbF8A=iGD>NF8eZ1kBwbSTAB4X(rU z+F|k6iImNFM)M_c^XG`~>#El!I6?zikA zug~@-oi{87+!67qVtbH#PVt)|LZQYjEI^3KO`>j)vEub8f!MYsjVLHL;Pp&RR5<&* zt{awShX+*l#w>cu6*)3?!pgL$|zPW^Trjq$W#HM7XZaX^Q< zoT%vYENxqc=MYLQ7xgTFz+g&$M_I_jS;|STVs`k~6bPRUj3#K$@X=`9%RXh-{-g91 zlF5Z&{|}VYzf5U7l84z1x?6i5=cg))Im3pVh)8wo0XD;z)9{?g<)=+ooQpAYxC`S| z>$U0ttzX4^Cf@0RrJbc?AxbyAv7iPA(nw*@%PrH7B zRi+9*@>$KMOl9<)Ri)%c{PZR0bE9KHB-|gF{fHZ^{9Smf%=>F3k=cbX69*Fmot(@w zTxMd+{fy^2gAaa4-RHrX`I~>u{oR-j zP`XkAgA^NMk@gt*-@V+fs>>|w^Y2)A=M`*o9^#-MtgLCZwz%{Ef+r|@J^Z->N5}uo zUdalWDh9vbP5*d95+D>!_4Sx=mv?_KVU=%*ButVzxhd|EmmDv`k7 zZ%*lvG^-dO3%WPfKD9^MRLg^@w9|%D{D&V3%;I#ErD=k)UKsYQn+L@YtVSp9SqPy8 z)lra$BeI_J?|fEu{VSX5{g@pKauU{`y{Bf*@MVb`ia%vZs~i!S*6CZb6_k!RYNgjL z5<=`1!)jU8Z0y3(sDH(>eT0)rK#ySeu++uo-4csj8Qm^fJVg=gmla%#{gtJY&nvCX zaSCLqNvo&dQre<$TvxDR{r2bD?WRhWr9fpZtN~T=O)#zM_^+x!K0-=Lmh>DpceId* zp&pm#yzNU@iwYvg%J+`;w&Jx#bps!#&rh+%=LcV7E)IbTDXpO-2Fb4nD2srurpwb; z;}csRHpTC|c;1y61SEJzw&wUxBi11YyjSwznYFeEyD@mi++l??Otx&+ZPThgbQE~) zZ@}7a`Nfhozy(dc&&U#!Q0q!0s8`|f{>)p*ei>BweV7sdnYpyih4Wh^?BpEA zRF#3T#qM2+V}G+^pPxrvyIGa}n@?&~2!jt(Xi!k|@%dcD)uqXH%|zblOvpb0v#t4b zNucUF*&EUJ5fzjZI(3z?>i1N1oVbI~Rn5o}V#ku;{$8Wr%7}I9fK*SXqS)TF#2#-e zPHePJAFaP(8de|8U{5=3*Ajkhjd;As=5fOHN7w~bDZ8DY7Xb3l029+*lwqoQ&U1gVnlOj!KsERgAkn-!yop1&=H>0XYjD;^lXSP z?XZcxb6n=Fm@5GJkTC?sdLVV4`QzGFze%K_gQan%0DL7H>!7y$mmPq777qAKBf7fE z7^Xe-M44SC9ID@FG)Gcp@8&$!&-$sC2fjwc;MzJ>PSbvAe@m3()5I%X~$>7VtT=aF>n8Iqtc`pJphQeEr7Y9eqZscHk ze76sr54a9w(0DUjxV`uP>$kNw40@k6*4wrmCmFBKFlrK_+$5#&O{1qeq~Y30`p`jt zPBrKp`ds8s+Hc8=97$&4;1vEZE42<#k>;EilDLoi`bp=u<#;Y?9(1t;Kbn)={}f6& z!0Pkh*9S!g(PD(8XhuC_ZG})4BGbB#KjINL50N~4)R?rFh-J@l*w|$QiXq4F}1 zrW(|iIA7*qI^0>_jC-IxRTuRVA)BKSs%`HOZ`>B)U1I9!VC#DE0Z7LE0Zoq*4lETE zSvdRkYZj7ae?TW5;?a>pF%sX7L>0m<8JpRVa$M_jDb-f6tr3new9erY z#QTMuLT8)s@J%aY9baPYPC;GC6@+4tt9z{l&cw~0sB;~sj*jcN;VzI!cm3u%ZZEIb zj?WX_3P2UcxKOhanev|;2$;;;W}h7p6v)j07A7v5l=m>x0la|8 zNFCOQw^haa*LQ%`92+%)&1++vnY8*J9e-`-JW}`#8r->m$rL`aD$j33Z3G zOKX{5X_Jk^1DjZ3is>fL#e)w;yoHCgOWF2R?gxBGGHo7%Bw$utqpWEj*nh31X9UI% zZNB6fMx@(hv#c*0%A#+2PoezOz4uQ!y#BGlfh^+z?ciqvB5{SZt&dPe2PVXrwcRuQP z9X@%sb4F64t#6g!Im^{{L37s35R6F#XKJU+Uc|}`e=05qG6fb)1rf-Ue!(E-{m#l8 zQppvcfJ`L1#gNHTr(zjkBlSd!-e*4D1BQRGr5c`v$x-4_l&qowu@^-;RFzx|JqBOZ zHyIx1=s<13wRHaO z%d-r|B9v|w`qtX^xUa-v_V;A!tKj$B*^bevRkU{PY@7))rUAL;AQiRie znrU+Ic3sYufKA~5ZFYXZ^?4%bFHq!%W+K;jfwVtz7FnwM?&H-JW8M2)nUQ%xgr4Nm zD976^B!2O|lnFE{nJel=L;pvLEq*+n=*`A<_N2yZHZC!y=N6#pnm<%PZHKY*V&suF z!zhiQqUe4wvCx63cTD3$cJFdi;V`iUQ-fcGkGv6LgKx)anM&=-qo_v<-c3X}=+ZQ> z+jBy9Gh#}Z1K0XW8$Eb=27`k>0K~X0_}l`?Di0luddHGpsw)({YKQUb3E*psB==bV zJ}S8vNPgHA{;^~lOkwixT3BI?`7g&bjGfO`d9zzb`M?O1u09!{ZJbK^YR+2cK^)WG z?=7zh7kb|ps9!!6=OYK?VSb#i()0UE$XwW&(i+I-TkPAH`LtLK$C{0;p2n1)5xOc& z+vh>DJDc_6(|deu0ibuE(l?i5q|)b5(DT2sWHwP9fv!<%xvAF6RBdvjD-;hLb@y7y zYp3}I#trIgjA^c3@Wx+kH7L-`?n%s0k6O`sQvsee+|#(MJ-n7tE7ht|5V0vp0584v>X zd|2OU${f0+iv->QMpBl3xC@8ITU1{O1W=-9x{p{o)7ME@DGn)@9FP0tEv*inHkTl# zHx;6C?}nzuvc$0r@AqR7gB+_|1`kH%43_TXLS(&q3*y)J6P{&dk4r)-ZG)+QV6$r zfY$3-ZE&&kNwpgf5xMoS;JA{x?S#OGyomzQbeJ*eSJeWBYN(si*Uv`$)cMWEAy$xi zAYPU3Wm)(0>Rh=2#8J&J>b_a0HnYVn0MOMhH9zld!G>Hy ztwr|5H*UptY$Ywrdj_18j}M>qw7J-|7DZ|yn_yG{Dh;7Mm(i$};X&R-4HG>n`tt*G zSXe8|JUG1fE!bkOH^@wa=Sq-Ln@vWYfaNy1=UNq&FMaR>P5Yfv7^k6@wOOuAV$CP| z5DG8zml>R+1+xl?)D55Fw<#74l*}`-mj`g@OKJZ*9c95j43M5C|IXPcnEHDpFFO

W)a^|tm7G_B5kYmR+ zWxhCYl=eR^3xDZ%d9T9x{c4c@wPEohXxI!wwnN-SONq}y$ikBiWi%Cj$i)=yH?J>+ z5z*+4yMC6jNZtVg40H|%N){v#I*$kNarZ>(EYchi zSjf6*1}l?8^<3K#-aH^s-fGw`Kx>L?N)`fjq*D{tD$y`X%=TeC(%n)MeKz%R;Mnog z(N!VAo_@uqv7G?415>Kh_he&YFr`<$?ESbz<b&u(@E&l1Cg?n?<#*Mnm|`hQDe+K= zF)^B+jDO+`bi&`-oH9lx#|Il87S!oHU{;^_D;snooT z?e%O>HN^h6LqddP`dX7$*(Geh=Spez`tigaz`NyYnmI zP_ggGs#e>P-gLYV3Bnk5Dyg%IQum}sH4Wk1cK{`Nxl%$J_x#DPcYy9@(v!E`?aUY` zZIqJHmCY?78h!p2mD=OK@|7-8WOmQsfKMilh===O+#49pB@uMF$G_ZnLa|{lndahN z_3&^OYwv_YpSs0b2*c+uYirZJzdimH*OHu|9q4qb(8EYtX$BwCpcQU=>Ne8Gu^wk< zDxBoNZ*VkpV;NRv{Rc~5dk0txUv$cT9H(;(H_A;mWSagUDc@U;W$n3axrGvh1+nfl z=C2#vEVabcRK@DJ1W&`-wYXMy!G%n1QH{2#9~21H4$;|XmpCXNBwagKWyo-{on1-bQJ1zN#*E$UGDkh$-V(?;^zrJpUS8!#sD#Fs9jM<33yw*wKn6U zOmhY-wKye!_-+JbTsk=#%C%>Iv!GXi(_K>OJ zhPq>7P3CD99PpnM3E%~+x#^FH;E>KC7a0&}Ley9M51bFmKV#xCa9a%J_$ zcFwPiZYFl#2R*}DyHLM3_g5cH84fnY>Occ+?f`g}_kO+%8e>JhIf@g(3Yz>q3Q$@j zSv{AhLI-|}lJ)UkXx$Hnlw~P=mpNlL9BHJ%PM>-d?9z5@#JF$Hlw!or-kBvf2O+z* zX7pHbcXfOUd}oARf4FyNdzjFPRO9oW>%1`(dZx4LTfuke`Ad9*paj2!t6SB9_#%Mv z%Z_oQUdC1=zrp70A>W_MLtH5qkR~_PQ$j_lUdPx_f6iPRrp~;h2fihTPi&o2_R@QL z8jP9>)t>Ed{1xr&T#6!qcV7q$<5PAM2`muIwc-b^L^aWg zF@uvJjlZcl%_BFvBsjHG5(=n^qRis?uVW9uiPrr~ zBH$?nQZptuN+S)0FpS83X=M=J^Ms&V&L%c!i4!7y>KAW!AQcN1*H+(AmKq&!sL`iY zS9$yvN0FU5^7${K^&bgRNFY4V#-~c?#7pv7)s#EF>1o*`O=b<8C&>67D@k)ISfDzb z#hMD{Tnx%?DP__UkbBAL)uf2bw}-yVNh6o)AMwz0sMB%)SK9;grwRs{a_6`}ieNo# zg&w#O=^#55^HNibRr_`z1bH3}dI2mf3N@M-TGjNb3GMH(akBCLHO`*VAw_14$j-l6 zZL~RHn*i$YtiMZ<&9tGB8?FmGh%l&kAnK5}d}E;d(K(p625WpJb8Uki^yUv@1rxk& zjgD@at+V@v?~$-!U0R)|A~oWzS>`PulvEShFcJl$xaqFT`r@XPX_$LEatFY-!fvCL zU!x?#o}~D?xg|)crQVi4TM?d=Mvu1ALK!%v7@NusU97+{7o&<7_v-Q@-sDYW& zp6^OBe>8#cW5_kF_wxFTIqG9$Wo0Cp6)u&pd4cIg`X%lvtowPh&KeWbg7IW{wVtUI zd+-aVEf24f$&T@I_*Tp4xbP>{<2cuFjQ}a}OsY?dS>?*XylxsU-Zu&Bg!UZTi!Hr` zh+l`iHl3V<`jF~f;^=aKHNI~}Pk>bJr&V=F{hz0j_c9B8ZE9`e;p}dtnC44XGRI|M zO*>PlhAY6rl0EJ)@(P3z##-9tVa=QxBPap`Pny)Hg`$Ibw{k+;DD2VvV^e}`z*lO| z>F)rKPGP@V8L->>sE{oA3lv-L5P7fxmPbf`atmhD(>+#V5n4pTEi!*Eo~X@G#}chS zuStF1+%z!^&mFz4{TY#X2LM`GG#5W2ylyKYxG?O?p_Z31#?Bxt$!`hgMYKHD6&Osk zgmDHI=IiF!M{C(`$O8QvsyDOUm3Ax(QKz{EnVT#a1>unwXD{H%(s6p>5!Y;MZL|)E zH)zA~a=46vl=#ea37_q0zu`k5FLLFcsX-|oVL6HpXx3$5wAc*Hr|Q3FPIIJ*yB0K! zfl~82N9$~LQrnU+cx^&6`IK>zfPo6bIn0JyL!aMelyfZkFbMBfhxzq9cNs^83JbAUJK5gcr=k+v91lp+kv18XmKg1bCIkHGDcTzI|BCKXxs@0Osm$ z#WM~NTQ{e2Vp{mf?SZ#Q@i_j^E#}%f!`tNH+gbKlrz;68@{0C$z_JEfriLEH1fX)h zZ+P*zdTB9o<^U(2Q&IeKyso&9)AvfAj?!f+G)KWocMg`TqTwbHdS2Z#nG7wQt-0uv4)CM+%+>I0G9_VR|?BqQ;Lou`5-AvH4h+ zE9Gnc=>Y!H?si|3fxW* zc2C_kiPM^u3S9N+iq)AjPb*|Be9J$6%1xA%qRDX)-M@9Rk%DG%xO8X85nO@P9bO`e zdtdL0Xthl7Wld2X%i0wCtrcXiR&2cNEJx*Vk=ep`yeJ@voDnz7GD8!tp=hJ^| z28EuLm-!r5Jo_Ooey;|RDqH5wT*O8mf--B;*yF6sZ4VFabX6WdeypnkP+*(Xjo;-? zdiyXqemqHtp<_>71@IV{_59bdkRnB-ghwWMrlC1%bWI{op3eydmT6z6X_$lwFuNF0 z@W|=>o@$$GTcwDFIIu>FTapG)pj%)u3-;)?JsC<;27kQa>OKSS*mNmVXJ2y=9O^1~oYwBajH^1em^$hwm zUlYyxC1Z5gXJAP4ITF84A{<^aJ@1Y;Au!xrAu@Uf8>|ipe52u7vzX%=K4bNwhTB+$ zL4ojE_!g*IJN@BRd}hrnwHTzFmS3~XDsW$;J*uZ6)%KcSg@tCJ+3IZZ7C^zdQg>bd>~F*#(JRV*_rxVkp0pJW z>#S+3t|Y$<81#$*O3fvn!9;GtVUTe%r<0X;cMcL3>gcW{o=*U#+mSVWMe zeizjW=v1QpmLpOsL5PbSXGCTuZ)c`(gI8S;?E}k_rvjIVu?Ub%JjGL7lxuOC7jc04r`1%WTo|8W z7D;oG(@8x~Qt>VKstwx{q29yf%OK4#uo>y& zC`$A_e66V=ww-Bqo>-b@id%eu8@#4zQ(3|Jo=aw0JP%QB= zpMc2=`VZ&DCXak)Jss^vgiRH`J^vu@*qBr@{VhM#y4m`@*z+8=UwsGZ{=#+apZDIw z1fG2r&0iJddQe~5nC1nv8M zbG%6RB@W%gwfsHYd_V;v%!)FFVV|mXvtl`Bx+CPdu3t1XqbWKSC>_awY2_LjMdKfX zf)HM@vsVcappGhRw@G7?t+iJ8l@68gk3gl43QOd9yH1!w&1lDs|EgQB0*UFKJPVp7 z%4C<;hmc(4+*Ua*ek|C|s_z3~#ic&CH8)TET$(oj>sz|(ch2(5yVmm_*)skH!mrTB zUC_X9_vx|k{Ze!dP`tNJx0C6kofMCi&14qSX&4 z`*oM(mhfm-mjjvAjO-=F?7xN=(_3m{0!fx^3ISyHB?jiwqvRk zxp@!0{x^jE7smeU^pYbyP9(-|0txfW!pAaru`i^o!GQN4?1a)Ub#93&uY`7Um~I8t zk(x~`^Q@s`Zm^|bUiM$NhYCUW@BVK0T3dy5YB$$LHjIVk@th@5Ub5bogQGY1r(i7y zV!x+A56*jO6rjGYZwyyw4rRXw#%z%{v)tO~_V8b7`o1VjzMTnHsJjK%-!M3Y zH2U|e+K*O={d-0RWd|;A{%s~q(aJM{ZFEC=avPh6G+iTb$tf!QA(wj+y|=6r&S%PH z4}(QDYjU1HP*n?>TD<1FdU(r{c34E%QPrNUxU7cNx1>e?^;&6yP=MA^Oc!tjkn7mP@CY3Li}I zP4YD^+NtY4cWIHQvTU!bt=H@;Z}TNZpTs4`Bs^!U1DWTyK03bT*qKSyK7n_CedPaP zy;iI(e3>0fS)sEoNP>UcJ5|8%@3gV?^!cIhrT3AAHyy+GuUY0U@TR<4%8;PFYFQY@ z5^KDEPec`FSXmqYGKc;6KLIfl&g^;j$gipV7yC5pHpV$MQWpp|rZbxNpV?3T8SBQL z9JYaAO#@w!4gUaY`6IddA8+ej=fr;sf5IOesw`t*lHp_nuYA|mWv>H{gqLw2R3-$}arOb>z&PcaGT z-oCZ>x5Dq*2jRcPs|fD=IjRVzk#{wf<9Ty0`~>y8k-%Cudj9hAKu z$HZfDoJL<#&2vYiJEUm=$y3jzW?X6uCZyJ(vcPlgTFWecSi@iez|CaErQ0T}KA;?j zm;l`Iz{PzYeMb{l4ML4Md`(~e9P_EQcG;yB(bxe4fs7h++#R`G0%_4&roc$&6g{Ya zkU16ZzpeW_B(z66dX<`Tfc)LPI@dMg?Qv~vk(ynh=Ts-xCc2qzLPXA>=eIS+d_uFe zn$|cSUwM;qlsM!AU!L)Va|lv|XDcUtnb$(i8b1y-54LEnc#oBm=2D#YEKl{WsyPGT zD-7bfzY1tc;oSn?Pqoe!Nj&#I{;Iv9Yp`Bj+*&yb(6C*(=cRj`KVG&9l{_tOt481T z@-nL)Xr{fA-wmVe-*`k%yq3+cXIf$>fj@h!1Pw#Oaovy0T@kM)6M{{Xg?_ELJQoa;mL z3BK{-n%oTfQ=9J`j8at$D5&V52~=cqILOGZpW?x`I_;6QKQH!71MvWg<~FuJ8t6VE z*&8iN%2PR$&eI`$liwWvw6!K*8ae7HqM1Z=QAGe4K8B1RLsC&d5z$gmNF15~TXwV-%5_Q1!iYq`~YM05WDEnI!W{^WjI{uTLi z{{RI1_!|EJA3the4R}U-Hj={D>+SlZ1tcsGjpsj70I%Dh2gNP!r*SZi3}s9TLDU|n zJu1|TEuDqUwV>pa&NGg4Q{HOHZEzKolH>0Rj^_H-J9lL~l{w7vc)Hf(RJpaaw1M5; z=i!h?Z}BTdOOFElQnb5=OoAV@!>CI- z%p%%A+OiHXeLGaYvhTz{jK2?caq#2EJ`KCL(xqlCb&v(GzE2x@te@LA_L9^*Lv^Qo zG5ATTJ6qh({+oLrRV&p~)~6AdW!YUPi;YPst=7%@c^-Srarla=mX0r~JTvyl{jap2 z4Y&L$&xZCE;_l|jpC5krNkl#Rv=|7kXt=# zl)v!8Ui?kHisId-xOt<9gUKgz3?I++uUm^r*7Ufx{f*Er17Uv(@o7fBK60%;g!Pl5 zLMxTsvCw!&Nzmb0tYcXi9(Np*Uf260!TqoCPff8{!=zc2l#ZUpymHcO!E~uAf*Cq` z8u#DWP8cWnpFHy_hf7m}4_`{}%kWL+G;XZ9k-?6{$nFz$AtoSKiJ}CnD96-pR5Zq;p(%h{?Lvt*ycP(K|5q44>m&Mx`KFitf%AITguj zlg2ziU=oe&AW(DDEhqW>EAuWj#kpc5K|7>%(2G$z+h|rRRl{TffnPg(MfjPl_&?)K z!)VuSE(vt?ci}*pYUHFgO>jQ8$aNa9~XQv_Afr~Pw@KcwhN4rEwrg7fA5|D0M5RTgmW60 z{J)k?{F+|+vzHl5*wjCb83rV8sidhd6J^T)2pP}KQP1coxIST9QHekQA! z^h=2x6DE|nQT!t(kJhjPWMGcHE35dg5{(1G*DDs&E{;Rv)H$rK)3KA6Jd-%8D8L*8 zMQF7~p+yu^0HTU00Hd!0ihaB?Pcp?4Zenn&>Q8DfZ9yj4`WycM1a0_fABB7o;%|-@ z*32f6Czu~~??_Z;z3oa}o5Js~J7C6HO4b0Zg9_ylQ2E3F~QGvfgEA4>cKi1C#(E+Lyz(k{o{s?>(T(^^zA6lQ_<%((F2=tp}d`mEm(ueAs@H9kxBXH1ehd*Z$GM_v9Ox{ZnX%Mv|}dH2Iz8|?le z)TOvkPum|DJx23X?K<&`=ns{*a_5M&e-zuu9xtCGaPDza=r*1q)Cjk`(`3Ugcllkb zzOeA~_=;O=NMp>K)mUe@dh~x2{3rhagj2%fShk3tD1!O^9u0I)4Nh)N#>XVFN}R0O z<=QudZLK^-rrITzS)gp?aB}z+^^Q zH0T;Aq>>LH$_nyE?_TxSe$fpV!uL-cXowQ}bqfoi=-W^E{Apy^XnQ)6heOQ8RCVie z%VTTfw~Tc?UgB>H>4i);SJK54knYR;$G@leRQ?Y55#h}U}@e`ggBm@Ll$) zs%Zj$I7sN&ah`^>$a48d4%D@5!mc86T*=zrXUv{3{fm5JHTf6A1sdIt?)=h5J$bKe z{hL2#n=gZ&X1%H28TAW@z#t!$2XS7XZn}hIyowlmcBhG_Wb-6aHiMPLdw5*Bo*KWk znr_=G6!CQ`!FU(C#;hE!>-DHS^pGxI?WvFUvr?ha|v-zwLWP8@X zfvzqeOdI0EZe=)A+lu7Mb1X%EA$4T5PqnN>r56Vjbq#YIL*_U=_@*0}o%6e|TB9`6 z4m^@axT6kJcHW}C^1g2_eG|8#%gNkbRz!_kCyaFi+Pu9rh19+)$*0GXV!Mtv`SYET zterpYqYCe}i`ngFzf1BGYiTgf*~1>g(zmzW)84{9_M`d zP)9v0lf#_N(8o@U-^AXBR&he}O?E$rZ;M_E);=5jP4NfA?-5$O7y2f#b7OZCjBS!B zSqS|roUhOy_&KBYYk%3t{tDso5$xsK=g1ulhpF54f+->iyLx zZBL#-MHEvRp&EfIar{EM?~D{hU#DO2Ll1<_qxdVt{{R_pZ1zj4I_dVaH}J(7CPVb-IPNRU&GQfJ{0yqCS<{85 zDwS1_y{{tsefYYDB?acmRT#{9&Oa;-ngsURDwy- zq~ji)&3+3?QmI|klGUGWNvP80lNaK>jqIKSc&zrAUGpd%7ai-N_%VK(pTdh^(T&#P zSXd6A_pWo|FNv+8(IN2r9glMnN~Ma6A0QlS$T}e32C!K|79l zt^WXo7UJ(&)Sfsu547b0>)yI(Cs!9_^f~Lz!aT>IcrQhs&r-Fzkw`BfAZOmbi_|ZKjP0WSgNr{F?WzKT@<$2|P>PF^qbO_vDl*Ngf_1 ztg6W~N?e{-g;>6Z# zE|b6X&8mMG>%V86N-q<@W_bycJNuQnQ_puF%L2L`16Xed_|p4Dl5Mf$dIsouuOj`L zJaYaY_?xj<>muV^ zMb#Fy>uWba=y z{{U)!mFv3I+Syo5JEJnBMUK(!Uq61(zAt|bd;r#(IGH87j@r+0>>a{??1H`vIIGQ= z^Tlj+LMqlf57^^II%mUHyEAWVnM9c#fyu=)_Lx~TzlGYp)!o+SxdB*bjB|nd)}QS5 z_TC!!MQFn)Xuv zi^j3V2Zm;7WKcj1Gux(roqYlS00jQ{rzPH${vEuy#>uuW(Z@YM4|@57#C|BWy3q+) zNkSV3^RBb@BKVdxUmMTi`(frj@Sq>|TN(VZU!P?7$Aiw4{LgC}2P!vb$G`A;uk7pg z_&yMR)%F&kc@SRsYf+IS$;R)rL6g{J=C8mC{<;4E;Njod%TWESzu=SJHnh_0Jk3kO y>lVMK7JO~;3G+V^gpbC*k(oeQSr?$_Yx-v|#JpA-GksC=S6uBOD58q)DF4~NpOPj3 literal 0 HcmV?d00001 diff --git a/test/facemark/david2.pts b/test/facemark/david2.pts new file mode 100644 index 000000000..2bc6417ec --- /dev/null +++ b/test/facemark/david2.pts @@ -0,0 +1,72 @@ +version: 1 +n_points: 68 +{ +138.01855 89.154579 +139.21136 99.620071 +141.09622 109.68665 +143.36954 119.3373 +147.22928 127.61332 +152.57074 135.18625 +158.65134 141.56909 +165.28262 146.72121 +174.01408 148.01602 +183.21277 146.32979 +190.60913 140.83598 +197.43958 133.6351 +202.74667 124.8311 +206.16403 115.28817 +207.67905 104.79993 +208.76697 94.739845 +209.33478 84.082756 +140.5466 79.584969 +143.79265 74.090782 +149.96266 72.06385 +156.88336 72.542412 +163.54131 74.896339 +173.25909 74.705627 +180.40253 72.099586 +188.18176 71.668297 +195.93837 73.45594 +201.47372 78.314529 +169.05841 82.679474 +168.88893 90.1269 +168.66153 97.418312 +168.50883 104.91183 +161.57272 110.26524 +165.5887 111.73885 +169.79231 112.92709 +174.47914 111.26287 +178.89832 109.82341 +147.75531 84.50544 +151.23106 81.83506 +155.93619 81.721588 +160.43626 85.227837 +156.02707 85.751213 +151.20631 85.890144 +180.62949 84.736366 +184.85313 81.426796 +189.57396 81.218529 +193.62616 83.520905 +189.93019 85.17701 +185.26933 85.270317 +157.39792 123.27344 +162.35428 122.18416 +167.24348 121.3379 +171.39757 122.47024 +175.48361 121.08866 +181.43904 121.58796 +187.50977 122.46669 +181.88339 126.85819 +176.3239 129.28384 +171.93306 130.0006 +167.53238 129.65094 +162.48717 127.85545 +159.54892 123.70994 +167.44278 124.58755 +171.64078 124.97156 +175.83786 124.21425 +185.24542 122.83739 +175.76491 124.44217 +171.51399 125.10707 +167.35634 124.75034 +} diff --git a/test/facemark/david2.txt b/test/facemark/david2.txt new file mode 100644 index 000000000..1d6c8bb9f --- /dev/null +++ b/test/facemark/david2.txt @@ -0,0 +1,69 @@ +C:\Users\Amro\Desktop\mexopencv\test\facemark\david2.jpg +138.01855 , 89.154579 +139.21136 , 99.620071 +141.09622 , 109.68665 +143.36954 , 119.3373 +147.22928 , 127.61332 +152.57074 , 135.18625 +158.65134 , 141.56909 +165.28262 , 146.72121 +174.01408 , 148.01602 +183.21277 , 146.32979 +190.60913 , 140.83598 +197.43958 , 133.6351 +202.74667 , 124.8311 +206.16403 , 115.28817 +207.67905 , 104.79993 +208.76697 , 94.739845 +209.33478 , 84.082756 +140.5466 , 79.584969 +143.79265 , 74.090782 +149.96266 , 72.06385 +156.88336 , 72.542412 +163.54131 , 74.896339 +173.25909 , 74.705627 +180.40253 , 72.099586 +188.18176 , 71.668297 +195.93837 , 73.45594 +201.47372 , 78.314529 +169.05841 , 82.679474 +168.88893 , 90.1269 +168.66153 , 97.418312 +168.50883 , 104.91183 +161.57272 , 110.26524 +165.5887 , 111.73885 +169.79231 , 112.92709 +174.47914 , 111.26287 +178.89832 , 109.82341 +147.75531 , 84.50544 +151.23106 , 81.83506 +155.93619 , 81.721588 +160.43626 , 85.227837 +156.02707 , 85.751213 +151.20631 , 85.890144 +180.62949 , 84.736366 +184.85313 , 81.426796 +189.57396 , 81.218529 +193.62616 , 83.520905 +189.93019 , 85.17701 +185.26933 , 85.270317 +157.39792 , 123.27344 +162.35428 , 122.18416 +167.24348 , 121.3379 +171.39757 , 122.47024 +175.48361 , 121.08866 +181.43904 , 121.58796 +187.50977 , 122.46669 +181.88339 , 126.85819 +176.3239 , 129.28384 +171.93306 , 130.0006 +167.53238 , 129.65094 +162.48717 , 127.85545 +159.54892 , 123.70994 +167.44278 , 124.58755 +171.64078 , 124.97156 +175.83786 , 124.21425 +185.24542 , 122.83739 +175.76491 , 124.44217 +171.51399 , 125.10707 +167.35634 , 124.75034 diff --git a/test/facemark/images.txt b/test/facemark/images.txt new file mode 100644 index 000000000..38447a6df --- /dev/null +++ b/test/facemark/images.txt @@ -0,0 +1,2 @@ +C:\Users\Amro\Desktop\mexopencv\test\facemark\david1.jpg +C:\Users\Amro\Desktop\mexopencv\test\facemark\david2.jpg diff --git a/test/facemark/points.txt b/test/facemark/points.txt new file mode 100644 index 000000000..d91861ee5 --- /dev/null +++ b/test/facemark/points.txt @@ -0,0 +1,2 @@ +C:\Users\Amro\Desktop\mexopencv\test\facemark\david1.jpg 122.19988 82.360611 123.93513 91.120468 126.51508 99.197578 129.22067 106.98578 133.19902 113.7834 138.84662 119.23383 145.27563 123.58327 152.18651 126.66142 159.3907 127.08925 165.91125 125.07861 170.39577 120.13544 174.49838 114.5818 177.66217 108.06754 179.41394 101.02079 179.97311 93.52005 180.39978 85.757675 180.10844 77.247322 128.72052 77.144432 133.13686 73.62281 139.00037 71.75547 145.08505 71.452034 151.24992 72.816711 160.14369 72.41198 164.71223 70.382805 169.64963 69.412865 174.70322 69.76152 178.02956 72.847336 156.21767 79.20623 157.11653 85.211922 158.01143 91.190155 158.92986 97.330559 151.75746 100.47115 154.97733 101.49317 158.52144 102.36021 161.64905 100.8894 164.51216 99.106064 136.55087 80.283745 140.01184 78.368507 143.979 77.924759 147.72862 80.49395 144.18651 81.212997 140.20213 81.41494 162.34778 79.328735 165.61121 76.311752 169.36847 75.820404 172.47527 77.276268 169.74754 79.076126 166.05165 79.389473 146.23233 109.6566 150.98267 108.45235 155.3127 107.40239 158.98721 108.04584 162.16663 107.04091 165.52028 107.39285 169.00368 107.97984 165.82381 111.13718 162.50626 112.89403 159.13608 113.43126 155.46181 113.32806 151.16457 112.3426 147.97754 109.61343 155.43669 109.46209 159.06902 109.723 162.33653 109.12227 167.43648 108.35755 162.22336 109.49796 158.96156 110.09193 155.33214 110.02882 +C:\Users\Amro\Desktop\mexopencv\test\facemark\david2.jpg 138.01855 89.154579 139.21136 99.620071 141.09622 109.68665 143.36954 119.3373 147.22928 127.61332 152.57074 135.18625 158.65134 141.56909 165.28262 146.72121 174.01408 148.01602 183.21277 146.32979 190.60913 140.83598 197.43958 133.6351 202.74667 124.8311 206.16403 115.28817 207.67905 104.79993 208.76697 94.739845 209.33478 84.082756 140.5466 79.584969 143.79265 74.090782 149.96266 72.06385 156.88336 72.542412 163.54131 74.896339 173.25909 74.705627 180.40253 72.099586 188.18176 71.668297 195.93837 73.45594 201.47372 78.314529 169.05841 82.679474 168.88893 90.1269 168.66153 97.418312 168.50883 104.91183 161.57272 110.26524 165.5887 111.73885 169.79231 112.92709 174.47914 111.26287 178.89832 109.82341 147.75531 84.50544 151.23106 81.83506 155.93619 81.721588 160.43626 85.227837 156.02707 85.751213 151.20631 85.890144 180.62949 84.736366 184.85313 81.426796 189.57396 81.218529 193.62616 83.520905 189.93019 85.17701 185.26933 85.270317 157.39792 123.27344 162.35428 122.18416 167.24348 121.3379 171.39757 122.47024 175.48361 121.08866 181.43904 121.58796 187.50977 122.46669 181.88339 126.85819 176.3239 129.28384 171.93306 130.0006 167.53238 129.65094 162.48717 127.85545 159.54892 123.70994 167.44278 124.58755 171.64078 124.97156 175.83786 124.21425 185.24542 122.83739 175.76491 124.44217 171.51399 125.10707 167.35634 124.75034 From 80507425a0db7d2758d29dab738a64f59b127aa3 Mon Sep 17 00:00:00 2001 From: Amro Date: Sat, 3 Feb 2018 14:30:42 +0200 Subject: [PATCH 27/36] samples: update colormaps demo add random user-defined colormap --- samples/falsecolor_demo_gui.m | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/samples/falsecolor_demo_gui.m b/samples/falsecolor_demo_gui.m index 5ccc954b8..9287ae3ec 100644 --- a/samples/falsecolor_demo_gui.m +++ b/samples/falsecolor_demo_gui.m @@ -4,12 +4,13 @@ % % Sources: % -% * +% * % function varargout = falsecolor_demo_gui(im) % load source image if nargin < 1 + %{ if ~mexopencv.isOctave() S = load(which('penny.mat')); img = cv.resize(uint8(S.P), 4, 4); @@ -20,6 +21,7 @@ im = fullfile(mexopencv.root(), 'test', 'HappyFish.jpg'); img = cv.imread(im, 'Grayscale',true); end + %} img = createRandomImage(); elseif ischar(im) img = cv.imread(im, 'Grayscale',true); @@ -82,6 +84,8 @@ function showColormaps(~,~,h) for i=1:N if strcmp(cmaps{i}, 'Gray') out{i} = cv.cvtColor(gray, 'GRAY2RGB'); + elseif strcmp(cmaps{i}, 'Random') + out{i} = cv.applyColorMap(gray, h.user); else out{i} = cv.applyColorMap(gray, cmaps{i}); end @@ -100,6 +104,8 @@ function onChange(~,~,h) idx = get(h.pop, 'Value'); if strcmp(cmaps{idx}, 'Gray') out = cv.cvtColor(h.gray, 'GRAY2RGB'); + elseif strcmp(cmaps{idx}, 'Random') + out = cv.applyColorMap(h.gray, h.user); else out = cv.applyColorMap(h.gray, cmaps{idx}); end @@ -114,12 +120,14 @@ function onChange(~,~,h) % params cmaps = {'Gray', 'Autumn', 'Bone', 'Cool', 'Hot', 'Ocean', 'Parula', ... - 'Pink', 'Spring', 'Summer', 'Winter', 'Jet', 'Rainbow', 'HSV'}; + 'Pink', 'Spring', 'Summer', 'Winter', 'Jet', 'Rainbow', 'HSV', ... + 'Random'}; sz = size(img); % build the user interface (no resizing to keep it simple) h = struct(); h.gray = img; + h.user = randi(255, [256 1 3], 'uint8'); % user-defined random colormap h.fig = figure('Name','Colormap Demo', ... 'NumberTitle','off', 'Menubar','none', 'Resize','off', ... 'Position',[200 200 sz(2) sz(1)+29]); From c761f132a7b47dd6dce51d6038407c2b44b4be5f Mon Sep 17 00:00:00 2001 From: Amro Date: Sat, 3 Feb 2018 14:42:42 +0200 Subject: [PATCH 28/36] samples: update saliency demo - boxes should be sorted by objectness - better visualization of boxes --- opencv_contrib/samples/computeSaliency_demo.m | 36 ++++++++++--------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/opencv_contrib/samples/computeSaliency_demo.m b/opencv_contrib/samples/computeSaliency_demo.m index e93990edf..50127b52d 100644 --- a/opencv_contrib/samples/computeSaliency_demo.m +++ b/opencv_contrib/samples/computeSaliency_demo.m @@ -3,7 +3,7 @@ % % Sources: % -% * +% * % %% Options @@ -124,28 +124,32 @@ objectnessBoundingBox = saliency.computeSaliency(frame); objectnessValues = saliency.getObjectnessValues(); toc - disp('Objectness done'); + fprintf('Objectness done. ndet = %d\n', numel(objectnessBoundingBox)); dir(fullfile(tempdir(),'Results')) %TODO: poor bounding boxes, are they ordered correctly? - %{ - % sort by values - [objectnessValues, idx] = sort(objectnessValues, 'descend'); - objectnessBoundingBox = objectnessBoundingBox(idx); - % keep best K - num = 10; - objectnessValues(num+1:end) = []; - objectnessBoundingBox(num+1:end) = []; - %} + if false + % sort by values (ascending or descending ?) + [objectnessValues, idx] = sort(objectnessValues, 'descend'); + objectnessBoundingBox = objectnessBoundingBox(idx); + end % plot bounding boxes around possible objects - for i=1:min(10, numel(objectnessBoundingBox)) - clr = randi([0 255], [1 3]); + % (results are sorted by objectness, we use the first few boxes here) + maxd = 7; + clr = round(255 * lines(maxd)); + for i=1:min(maxd, numel(objectnessBoundingBox)) bb = objectnessBoundingBox{i}; val = objectnessValues(i); - frame = cv.rectangle(frame, bb(1:2), bb(3:4), 'Color',clr); - frame = cv.putText(frame, num2str(val), bb(1:2)-[0 2], ... - 'Color',clr, 'FontScale',0.5); + % add jitter to seperate single rects + off = rand(1,2) * 2 * 9 - 9; + frame = cv.rectangle(frame, bb(1:2)+off, bb(3:4)+off, ... + 'Color',clr(i,:), 'Thickness',2); + frame = cv.putText(frame, num2str(val), bb(1:2)+off+[2 -3], ... + 'Color',clr(i,:), 'FontScale',0.5); + % mini temperature scale + frame = cv.rectangle(frame, [20 20+(i-1)*10 10 10], ... + 'Color',clr(i,:), 'Thickness',-1); end imshow(frame), title('Objectness') end From c0e3f0189fe77a244142395254926dca367e21eb Mon Sep 17 00:00:00 2001 From: Amro Date: Sat, 3 Feb 2018 15:13:26 +0200 Subject: [PATCH 29/36] samples: new kaleidoscope reflections demo --- samples/kaleidoscope_demo.m | 203 ++++++++++++++++++++++++++++++++++++ 1 file changed, 203 insertions(+) create mode 100644 samples/kaleidoscope_demo.m diff --git a/samples/kaleidoscope_demo.m b/samples/kaleidoscope_demo.m new file mode 100644 index 000000000..379d75a32 --- /dev/null +++ b/samples/kaleidoscope_demo.m @@ -0,0 +1,203 @@ +%% Kaleidoscopic Reflections +% +% Inspired by: +% +% + +function kaleidoscope_demo(k, iters, doImg) + % kaleidoscope options + if nargin < 1, k = 4; end % n-folds (n=4*k) + if nargin < 2, iters = 1; end % recursive level: 1, 2, .. + if nargin < 3, doImg = true; end + validateattributes(k, {'numeric'}, {'scalar', 'integer', '>=',1, '<=',4}, 1); + validateattributes(iters, {'numeric'}, {'scalar', 'integer', 'positive'}, 2); + validateattributes(doImg, {'numeric', 'logical'}, {'scalar', 'binary'}, 3); + kaleidoscope = @(img) kaleidoscope_wrapper(img, k, iters); + + % run demo + if doImg + %img = imread(which('peppers.png')); + %img = imread(fullfile(mexopencv.root(),'test','lena.jpg')); + img = imread(fullfile(mexopencv.root(),'test','fruits.jpg')); + demo_image(kaleidoscope, img); + else + cap = cv.VideoCapture(0); + assert(cap.isOpened()); + demo_video(kaleidoscope, cap); + cap.release(); + end +end + +function demo_image(kaleidoscope, img) + % input image must be square-sized + sz = 400; + img = cv.resize(img, [sz sz]); + + % animation by repeating a number of times + hImg = imshow(img); + for i=1:81 % sz/(10/2)+1 + % each time offset the input image with wrap-around padding + of = (i-1)*10; + if true + im = cv.copyMakeBorder(img, [of 0 0 of], 'BorderType','Reflect101'); + im = im(1:end-of,of+1:end,:); + else + im = circshift(img, [of -of]); + end + + % show kaleidoscope effect + out = kaleidoscope(im); + set(hImg, 'CData',out) + drawnow + end +end + +function demo_video(kaleidoscope, cap) + % video stream + sz = 400; + img = cap.read(); + assert(~isempty(img)); + img = cv.resize(img, [sz sz]); + + hImg = imshow(img); + while ishghandle(hImg) + % grab new frame + img = cap.read(); + if isempty(img), break; end + img = cv.resize(img, [sz sz]); + + % show kaleidoscope effect + out = kaleidoscope(img); + set(hImg, 'CData',out) + drawnow + end +end + +%% Kaleidoscope functions + +function out = kaleidoscope_4folds(img, flag) + % take full image (pattern fills a square) + out = img; + + % tile (mirror and repeat) this pattern 4 times to get 4-fold symmetry + if nargin < 2 || flag, out = cv.rotate(out, '180'); end + out = image_tile(out); +end + +function out = kaleidoscope_8folds(img, flag) + % extract wedge (divide square into 2 folds, take top one) + sz = size(img, 1); % assumes a square image + if true + mask = zeros(sz, 'uint8'); + mask = cv.fillConvexPoly(mask, [0 0; sz sz; sz 0], 'Color',255); + else + mask = triu(ones(sz, 'uint8') * 255); + end + q1 = image_crop(img, mask); + + % create other wedge (transpose and h-flip) + q2 = cv.flip(cv.rotate(q1, '90CW'), 1); + + % merge the two wedges (pattern fills a square) + out = image_merge(q1, q2); + + % tile (mirror and repeat) this pattern 4 times to get 8-fold symmetry + if nargin < 2 || flag, out = cv.rotate(out, '180'); end + out = image_tile(out); +end + +function out = kaleidoscope_12folds(img, flag) + % extract wedge (divide square into 3 folds, take middle one) + sz = size(img, 1); % assumes a square image + s = round(sz * tand(30)); + mask = zeros(sz, 'uint8'); + mask = cv.fillConvexPoly(mask, [0 0; s sz; sz sz; sz s], 'Color',255); + q1 = image_crop(img, mask); + + % create other two wedges (transpose, h-flip, and rotate +/- 30 degrees) + q = cv.flip(cv.rotate(q1, '90CW'), 1); + q2 = image_rotate(q, -30); + q3 = image_rotate(q, +30); + + % merge the three wedges (pattern fills a square) + out = image_merge(image_merge(q1, q2), q3); + + % tile (mirror and repeat) this pattern 4 times to get 12-fold symmetry + if nargin < 2 || flag, out = cv.rotate(out, '180'); end + out = image_tile(out); +end + +function out = kaleidoscope_16folds(img, flag) + % extract wedge (divide square into 4 folds, take middle-top one) + sz = size(img, 1); % assumes a square image + s = round(sz * tand(22.5)); + mask = zeros(sz, 'uint8'); + mask = cv.fillConvexPoly(mask, [0 0; sz sz; sz s], 'Color',255); + q1 = image_crop(img, mask); + + % create bottom wedge (rotate 45 degrees), and merge + q2 = image_rotate(q1, -45); + q12 = image_merge(q1, q2); + + % create other two wedges (transpose and h-flip), and merge + q34 = cv.flip(cv.rotate(q12, '90CW'), 1); + out = image_merge(q12, q34); + + % tile (mirror and repeat) this pattern 4 times to get 16-fold symmetry + if nargin < 2 || flag, out = cv.rotate(out, '180'); end + out = image_tile(out); +end + +%% Helper functions + +function out = kaleidoscope_wrapper(img, k, iters) + switch k + case 1 + kaleidoscope_nfolds = @kaleidoscope_4folds; + case 2 + kaleidoscope_nfolds = @kaleidoscope_8folds; + case 3 + kaleidoscope_nfolds = @kaleidoscope_12folds; + case 4 + kaleidoscope_nfolds = @kaleidoscope_16folds; + end + + % build kaleidoscope + out = kaleidoscope_nfolds(img); + for n=2:iters + % apply recursively for more fun + out = kaleidoscope_nfolds(out, false); + end +end + +function out = image_crop(img, mask) + if true + out = cv.copyTo(img, 'Mask',mask); + elseif true + out = cv.bitwise_and(img, uint8(255), 'Mask',mask); + else + out = bsxfun(@times, img, uint8(mask~=0)); + end +end + +function out = image_rotate(img, angl) + T = cv.getRotationMatrix2D([0 0], angl, 1); + out = cv.warpAffine(img, T); +end + +function out = image_merge(img1, img2) + if true + out = cv.bitwise_or(img1, img2); + elseif true + out = cv.addWeighted(img1,1, img2,1, 0); + else + out = img1 + img2; + end +end + +function out = image_tile(img) + % downsample first so that output has same size as input + out = cv.resize(img, 0.5, 0.5, 'Interpolation','Area'); + out = [out, cv.flip(out, 1)]; + out = [out; cv.flip(out, 0)]; +end From c74211eb70775ad9627ebe0eeea8d6d45b4fba0b Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 4 Feb 2018 16:48:49 +0200 Subject: [PATCH 30/36] dnn: update samples refactoring and minor edits --- samples/caffe_googlenet_demo.m | 10 +- samples/dnn_face_detector.m | 9 +- samples/dnn_image_classification_demo.m | 8 +- samples/dnn_object_detection_demo.m | 138 +++++++++++++++-------- samples/dnn_semantic_segmentation_demo.m | 24 ++-- samples/fcn_semsegm_demo.m | 6 +- 6 files changed, 122 insertions(+), 73 deletions(-) diff --git a/samples/caffe_googlenet_demo.m b/samples/caffe_googlenet_demo.m index 8d8dfb518..9c8fae6fe 100644 --- a/samples/caffe_googlenet_demo.m +++ b/samples/caffe_googlenet_demo.m @@ -7,9 +7,9 @@ % % Sources: % -% * -% * -% * +% * +% * +% * % %% BVLC GoogLeNet @@ -26,8 +26,8 @@ modelBin = fullfile(dirDNN, 'bvlc_googlenet.caffemodel'); % 51 MB file files = {modelLabels, modelTxt, modelBin}; urls = { - 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/synset_words.txt'; - 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/bvlc_googlenet.prototxt'; + 'https://cdn.rawgit.com/opencv/opencv/3.4.0/samples/data/dnn/synset_words.txt'; + 'https://cdn.rawgit.com/opencv/opencv/3.4.0/samples/data/dnn/bvlc_googlenet.prototxt'; 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel'; }; if ~isdir(dirDNN), mkdir(dirDNN); end diff --git a/samples/dnn_face_detector.m b/samples/dnn_face_detector.m index 3e2753246..e70384328 100644 --- a/samples/dnn_face_detector.m +++ b/samples/dnn_face_detector.m @@ -4,7 +4,8 @@ % % Sources: % -% * +% * +% * % %% @@ -86,13 +87,13 @@ function [net, blobOpts] = ResNetSSD_FaceDetector() %RESNETSSD_FACEDETECTOR face detector based on SSD framework with reduced ResNet-10 backbone % - % homepage = https://github.com/opencv/opencv/blob/3.3.1/samples/dnn/face_detector/how_to_train_face_detector.txt + % homepage = https://github.com/opencv/opencv/blob/3.4.0/samples/dnn/face_detector/how_to_train_face_detector.txt % % ## Model % % file = test/dnn/ResNetSSD_FaceDetector/deploy.prototxt - % url = https://github.com/opencv/opencv/raw/3.3.1/samples/dnn/face_detector/deploy.prototxt - % hash = 5fd52177a483cbac12fd61e9ecd87c762829ecbe + % url = https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt + % hash = 006BAF926232DF6F6332DEFB9C24F94BB9F3764E % % ## Weights % diff --git a/samples/dnn_image_classification_demo.m b/samples/dnn_image_classification_demo.m index 2ff704df4..e2e4816e5 100644 --- a/samples/dnn_image_classification_demo.m +++ b/samples/dnn_image_classification_demo.m @@ -10,10 +10,10 @@ % % Sources: % -% * -% * -% * -% * +% * +% * +% * +% * % function dnn_image_classification_demo(im, name, crop) diff --git a/samples/dnn_object_detection_demo.m b/samples/dnn_object_detection_demo.m index d2fcd496e..e8f36f107 100644 --- a/samples/dnn_object_detection_demo.m +++ b/samples/dnn_object_detection_demo.m @@ -1,7 +1,7 @@ %% DNN Object Detection % -% This sample uses Single-Shot Detector (SSD) or You Only Look Once (YOLO) to -% detect objects on image (produces bounding boxes and corresponding labels). +% This sample uses DNN to detect objects on image (produces bounding boxes and +% corresponding labels), using different methods: % % * % * @@ -12,11 +12,12 @@ % % Sources: % -% * -% * -% * -% * -% * +% * +% * +% * +% * +% * +% * % function dnn_object_detection_demo(im, name, crop, min_conf) @@ -50,7 +51,8 @@ function dnn_object_detection_demo(im, name, crop, min_conf) blobOpts = ['Crop',crop, blobOpts]; opts = parseBlobOpts(blobOpts{:}); blob = cv.Net.blobFromImages(img, blobOpts{:}); - net.setInput(blob); + net.setInput(blob); % net.setInput(blob, 'data'); + isz = [opts.Size(1), opts.Size(2)]; % run forward pass fprintf('Forward pass... '); tic; @@ -58,26 +60,13 @@ function dnn_object_detection_demo(im, name, crop, min_conf) toc; % prepare output image - if opts.Crop - % center cropped as fed to network - out = cropImage(img, opts); - else - if false - % resized image (squashed) as fed to network - out = imageFromBlob(blob, opts); - else - % unmodified original image - out = img; - end - end - out = flip(out, 3); % BGR to RGB - - % build detections struct (adjust relative bounding boxes to image size) - detections = processOutput(detections, name, [size(out,2) size(out,1)]); + out = outputImage(img, blob, opts); + osz = [size(out,2) size(out,1)]; - % filter-out weak detections according to a minimum confidence threshold + % build detections struct, keeping only strong detections + % (according to a minimum confidence threshold) if nargin < 4, min_conf = 0.2; end - detections = detections([detections.confidence] >= min_conf); + detections = processOutput(detections, name, isz, osz, min_conf); % localization: show bounding boxes for i=1:numel(detections) @@ -200,37 +189,92 @@ function dnn_object_detection_demo(im, name, crop, min_conf) end end -function detections = processOutput(output, name, sz) - isYOLO = strcmpi(name, 'yolo'); - if isYOLO +function out = outputImage(img, blob, opts) + if opts.Crop + % center cropped as fed to network + out = cropImage(img, opts); + else + if false + % resized image (squashed) as fed to network + out = imageFromBlob(blob, opts); + else + % unmodified original image + out = img; + end + end + out = flip(out, 3); % BGR to RGB +end + +function S = processOutput(output, name, isz, osz, thresh) + %PROCESSOUTPUT Process output into detections structure + % + % S = processOutput(output, name, isz, osz, thresh) + % + % ## Input + % * __output__ network output blob + % * __name__ network model name + % * __isz__ size of input image blob `[w,h]` + % * __osz__ size of output image where detections are drawn `[w,h]` + % * __thresh__ minimum confidence threshold + % + % ## Output + % * __S__ struct-array of detections, with the following fields: + % * **img_id** index of image + % * **class_id** index of class + % * __confidence__ detection confidence + % * __rect__ detection bounding box `[x,y,w,h]` + % + + % reshape and adjust coordinates when necessary + if strcmpi(name, 'yolo') % YOLO output is already ndetections-by-25-by-1-by-1 % (20+5 for VOC, 80+5 for COCO) + % (center_x, center_y, width, height, unused_t0, probability_for_each_class[20]) + + % adjust relative coordinates to image size + output(:,1:4) = bsxfun(@times, output(:,1:4), [osz osz]); + + % test predictions confidence against threshold + idx = max(output(:,6:end), [], 2) > thresh; else % SSD output is 1-by-1-by-ndetections-by-7 + % (img_id, class_id, confidence, left, bottom, right, top) output = permute(output, [3 4 2 1]); + + % adjust relative coordinates to image size + output(:,4:7) = bsxfun(@times, output(:,4:7), [osz osz]); + + % test predictions confidence against threshold + idx = output(:,3) > thresh; end + + % filter out weak detections + output = output(idx,:); num = size(output,1); - % note: bounding boxes returned are percentages relative to image size - detections = struct('img_id',[], 'class_id',[], 'confidence',[], 'rect',[]); - detections = repmat(detections, num, 1); + % detections struct-array + S = struct('img_id',[], 'class_id',[], 'confidence',[], 'rect',[]); + S = repmat(S, num, 1); for i=1:num - if isYOLO - % (center_x, center_y, width, height, unused_t0, probability_for_each_class[20]) - rrect = struct('center',output(i,1:2) .* sz, ... - 'size',output(i,3:4) .* sz, 'angle',0); - detections(i).rect = cv.RotatedRect.boundingRect(rrect); - [detections(i).confidence, detections(i).class_id] = max(output(i,6:end)); - detections(i).img_id = 0; + if strcmpi(name, 'yolo') + S(i).img_id = 0; + [S(i).confidence, S(i).class_id] = max(output(i,6:end)); + rect = cv.RotatedRect.boundingRect(struct(... + 'center',output(i,1:2), 'size',output(i,3:4), 'angle',0)); else - % (img_id, class_id, confidence, xLeftBottom, yLeftBottom, xRightTop, yRightTop) - detections(i).img_id = output(i,1); - detections(i).class_id = output(i,2); - detections(i).confidence = output(i,3); - detections(i).rect = round(cv.Rect.from2points(... - output(i,4:5) .* sz, output(i,6:7) .* sz)); + S(i).img_id = output(i,1); + S(i).class_id = output(i,2); + S(i).confidence = output(i,3); + rect = cv.Rect.from2points(output(i,4:5), output(i,6:7)); end + % clamp coordinates + rect = cv.Rect.intersect(rect, [0 0 osz]); + S(i).rect = round(rect); end + + % remove small detections (and out-of-bound rects after clamping) + idx = cv.Rect.area(cat(1, S.rect)) > 5; + S = S(idx); end function img = insertAnnotation(img, rect, str, varargin) @@ -326,7 +370,6 @@ function dnn_object_detection_demo(im, name, crop, min_conf) % (VOC: 20 classes, ILSVRC: 200 classes, http://image-net.org/challenges/LSVRC/2016/browse-det-synsets) imageset = validatestring(imageset, {'VOC', 'ILSVRC'}); dname = get_dnn_dir(fullfile('VGGNetSSD', imageset)); - blobOpts = {'SwapRB',false, 'Size',[300 300], 'Mean',[104 117 123]}; if strcmp(imageset, 'VOC') net = cv.Net('Caffe', ... fullfile(dname, 'deploy.prototxt'), ... @@ -334,10 +377,11 @@ function dnn_object_detection_demo(im, name, crop, min_conf) labels = readLabelsColors(fullfile(dname, 'pascal-classes.txt'), false); else net = cv.Net('Caffe', ... - fullfile(dname, 'ssd_vgg16.prototxt'), ... + fullfile(dname, 'deploy.prototxt'), ... fullfile(dname, 'VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel')); labels = readLabelsProtoTxt(fullfile(dname, 'labelmap_ilsvrc_det.prototxt'), false); end + blobOpts = {'SwapRB',false, 'Size',[300 300], 'Mean',[104 117 123]}; end function [net, labels, blobOpts] = MobileNetSSD(imageset) diff --git a/samples/dnn_semantic_segmentation_demo.m b/samples/dnn_semantic_segmentation_demo.m index 69cdd4dd1..ed20d625b 100644 --- a/samples/dnn_semantic_segmentation_demo.m +++ b/samples/dnn_semantic_segmentation_demo.m @@ -8,8 +8,8 @@ % % Sources: % -% * -% * +% * +% * % function dnn_semantic_segmentation_demo(im, name, crop) @@ -48,14 +48,7 @@ function dnn_semantic_segmentation_demo(im, name, crop) toc; % prepare output image - if opts.Crop - % center cropped as fed to network - out = cropImage(img, opts); - else - % resized image (squashed) as fed to network - out = imageFromBlob(blob, opts); - end - out = flip(out, 3); % BGR to RGB + out = outputImage(img, blob, opts); % pixel-wise segmentation (predict class with max score) score = permute(score, [3 4 2 1]); % H-by-W-by-nclasses @@ -157,6 +150,17 @@ function dnn_semantic_segmentation_demo(im, name, crop) end end +function out = outputImage(img, blob, opts) + if opts.Crop + % center cropped as fed to network + out = cropImage(img, opts); + else + % resized image (squashed) as fed to network + out = imageFromBlob(blob, opts); + end + out = flip(out, 3); % BGR to RGB +end + function img = createLabelsLegend(labels) img = cell(numel(labels),1); for i=1:numel(labels) diff --git a/samples/fcn_semsegm_demo.m b/samples/fcn_semsegm_demo.m index f320e28be..f109834b4 100644 --- a/samples/fcn_semsegm_demo.m +++ b/samples/fcn_semsegm_demo.m @@ -6,7 +6,7 @@ % % Sources: % -% * +% * % %% Model files @@ -16,8 +16,8 @@ modelBin = fullfile(dirDNN, 'fcn8s-heavy-pascal.caffemodel'); % 513 MB file files = {modelLabels, modelTxt, modelBin}; urls = { - 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/pascal-classes.txt'; - 'https://cdn.rawgit.com/opencv/opencv/3.3.1/samples/data/dnn/fcn8s-heavy-pascal.prototxt'; + 'https://cdn.rawgit.com/opencv/opencv/3.4.0/samples/data/dnn/pascal-classes.txt'; + 'https://cdn.rawgit.com/opencv/opencv/3.4.0/samples/data/dnn/fcn8s-heavy-pascal.prototxt'; 'http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel'; }; if ~isdir(dirDNN), mkdir(dirDNN); end From c9e9e17ca863e6f45efa65aee5fed250a4bd9b27 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 4 Feb 2018 17:26:35 +0200 Subject: [PATCH 31/36] dnn: update objection sample opencv added support for Faster-RCNN and R-FCN models --- samples/dnn_object_detection_demo.m | 161 +++++++++++++++++++++++++++- 1 file changed, 159 insertions(+), 2 deletions(-) diff --git a/samples/dnn_object_detection_demo.m b/samples/dnn_object_detection_demo.m index e8f36f107..40cdd7078 100644 --- a/samples/dnn_object_detection_demo.m +++ b/samples/dnn_object_detection_demo.m @@ -9,6 +9,7 @@ % * % * % * +% * % % Sources: % @@ -18,6 +19,7 @@ % * % * % * +% * % function dnn_object_detection_demo(im, name, crop, min_conf) @@ -40,12 +42,29 @@ function dnn_object_detection_demo(im, name, crop, min_conf) case 'yolo' % PASCAL VOC or Microsoft COCO [net, labels, blobOpts] = YOLO('VOC', true); + case 'fasterrcnn' + % PASCAL VOC + [net, labels, blobOpts] = FasterRCNN('VGG'); + case 'rfcn' + % PASCAL VOC + [net, labels, blobOpts] = RFCN(); otherwise error('Unrecognized model %s', name) end toc; assert(~net.empty(), 'Failed to read network %s', name); + % determine blob size for region-based networks + if any(strcmpi(name, {'fasterrcnn', 'rfcn'})) + % compute blob shape from image size, and feed it to the network, + % this determines scale of coordinates in detections + im_info = rpn_image_info([size(img,1) size(img,2)]); + net.setInput(single(im_info), 'im_info'); % [h,w,scale] + + % set blob size option + blobOpts = [blobOpts, 'Size',fliplr(im_info(1:2))]; % [w,h] + end + % feed image to network if nargin < 3, crop = false; end blobOpts = ['Crop',crop, blobOpts]; @@ -65,7 +84,7 @@ function dnn_object_detection_demo(im, name, crop, min_conf) % build detections struct, keeping only strong detections % (according to a minimum confidence threshold) - if nargin < 4, min_conf = 0.2; end + if nargin < 4, min_conf = 0.2; end % 0.8 detections = processOutput(detections, name, isz, osz, min_conf); % localization: show bounding boxes @@ -205,6 +224,42 @@ function dnn_object_detection_demo(im, name, crop, min_conf) out = flip(out, 3); % BGR to RGB end +function im_info = rpn_image_info(sz) + %RPN_IMAGE_INFO Calculate blob shape + % + % im_info = rpn_image_info(sz) + % + % ## Input + % * **sz** image size `[h,w]` + % + % ## Output + % * **im_info** blob shape info `[h,w,scale]` + % + + if true + target_size = 600; % pixel size of input image shortest side + max_size = 1000; % max pixel size of scaled input image longest side + + % re-scale image such that its shorter side is 600 pixels, + % it may be less than 600 as we cap the longest side at 1000 pixels + % and maintain image's aspect ratio + im_scale = target_size / min(sz); + if round(im_scale * max(sz)) > max_size + im_scale = max_size / max(sz); + end + im_size = round(sz * im_scale); + elseif true + % keep size as is + im_size = sz; + im_scale = 1; + else + im_size = [600, 800]; + im_scale = 1.6; + %im_scale = im_size(1) / sz(1); + end + im_info = [im_size, im_scale]; +end + function S = processOutput(output, name, isz, osz, thresh) %PROCESSOUTPUT Process output into detections structure % @@ -237,10 +292,18 @@ function dnn_object_detection_demo(im, name, crop, min_conf) % test predictions confidence against threshold idx = max(output(:,6:end), [], 2) > thresh; else - % SSD output is 1-by-1-by-ndetections-by-7 + % SSD/region-proposal output is 1-by-1-by-ndetections-by-7 % (img_id, class_id, confidence, left, bottom, right, top) output = permute(output, [3 4 2 1]); + % unify cases by always having coordinates relative to image size + if any(strcmpi(name, {'fasterrcnn', 'rfcn'})) + output(:,4:7) = bsxfun(@rdivide, output(:,4:7), [isz isz]); + end + + % clamp coordinates + %output(:,4:7) = min(max(output(:,4:7), 0), 1); + % adjust relative coordinates to image size output(:,4:7) = bsxfun(@times, output(:,4:7), [osz osz]); @@ -546,3 +609,97 @@ function dnn_object_detection_demo(im, name, crop, min_conf) labels = readLabels(fullfile(dname, [lower(imageset) '.names']), true); blobOpts = {'SwapRB',false, 'Size',[416 416], 'ScaleFactor',1/255}; end + +function [net, labels, blobOpts] = FasterRCNN(m) + %FASTERRCNN Faster Region-based Convolutional Neural Networks (Faster R-CNN) + % + % homepage = https://github.com/rbgirshick/py-faster-rcnn + % + % # Faster R-CNN, VGG16, PASCAL VOC 2007 [Caffe] + % + % ## Model + % + % file = test/dnn/FasterRCNN/faster_rcnn_vgg16.prototxt + % url = https://github.com/opencv/opencv_extra/raw/3.4.0/testdata/dnn/faster_rcnn_vgg16.prototxt + % hash = 40be8a41a6d5a16adf65b39f9a3aa2940b21d6bf + % + % ## Weights + % + % file = test/dnn/FasterRCNN/VGG16_faster_rcnn_final.caffemodel + % url = http://www.cs.berkeley.edu/~rbg/faster-rcnn-data/faster_rcnn_models.tgz + % url = https://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz + % hash = 51bca62727c3fe5d14b66e9331373c1e297df7d1 + % size = 694 MB + % + % # Faster R-CNN, ZF, PASCAL VOC 2007 [Caffe] + % + % ## Model + % + % file = test/dnn/FasterRCNN/faster_rcnn_zf.prototxt + % url = https://github.com/opencv/opencv_extra/raw/3.4.0/testdata/dnn/faster_rcnn_zf.prototxt + % hash = 373c358b22550fb2a89ed10ba41bbce8abd0e3ff + % + % ## Weights + % + % file = test/dnn/FasterRCNN/ZF_faster_rcnn_final.caffemodel + % url = http://www.cs.berkeley.edu/~rbg/faster-rcnn-data/faster_rcnn_models.tgz + % url = https://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz + % hash = 51bca62727c3fe5d14b66e9331373c1e297df7d1 + % size = 694 MB + % + % # Classes + % + % file = test/dnn/FasterRCNN/pascal-classes.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/pascal-classes.txt + % + + dname = get_dnn_dir('FasterRCNN'); + m = validatestring(m, {'VGG', 'ZF'}); + if strcmp(m, 'VGG') + net = cv.Net('Caffe', ... + fullfile(dname, 'faster_rcnn_vgg16.prototxt'), ... + fullfile(dname, 'VGG16_faster_rcnn_final.caffemodel')); + else + net = cv.Net('Caffe', ... + fullfile(dname, 'faster_rcnn_zf.prototxt'), ... + fullfile(dname, 'ZF_faster_rcnn_final.caffemodel')); + end + labels = readLabelsColors(fullfile(dname, 'pascal-classes.txt'), false); + blobOpts = {'SwapRB',false, 'Mean',[102.9801, 115.9465, 122.7717]}; + % 'Size',[800 600] +end + +function [net, labels, blobOpts] = RFCN() + %RFCN Region-based Fully Convolutional Networks (R-FCN) + % + % homepage = https://github.com/YuwenXiong/py-R-FCN + % + % # R-FCN, ResNet-50, PASCAL VOC 07+12 [Caffe] + % + % ## Model + % + % file = test/dnn/RFCN/rfcn_pascal_voc_resnet50.prototxt + % url = https://github.com/opencv/opencv_extra/raw/3.4.0/testdata/dnn/rfcn_pascal_voc_resnet50.prototxt + % hash = 5037174369f202d9d901fa43fc19dca36d0a051c + % + % ## Weights + % + % file = test/dnn/RFCN/resnet50_rfcn_final.caffemodel + % url = https://1drv.ms/u/s!AoN7vygOjLIQqUWHpY67oaC7mopf + % hash = bb3180da68b2b71494f8d3eb8f51b2d47467da3e + % size = 293 MB + % + % ## Classes + % + % file = test/dnn/RFCN/pascal-classes.txt + % url = https://github.com/opencv/opencv/raw/3.3.1/samples/data/dnn/pascal-classes.txt + % + + dname = get_dnn_dir('RFCN'); + net = cv.Net('Caffe', ... + fullfile(dname, 'rfcn_pascal_voc_resnet50.prototxt'), ... + fullfile(dname, 'resnet50_rfcn_final.caffemodel')); + labels = readLabelsColors(fullfile(dname, 'pascal-classes.txt'), false); + blobOpts = {'SwapRB',false, 'Mean',[102.9801, 115.9465, 122.7717]}; + % 'Size',[800 600] +end From 4bd5e43607297ce5b7cb9641e5a93882bb3e1022 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 4 Feb 2018 17:38:05 +0200 Subject: [PATCH 32/36] dnn: update face detection sample refactor detection function --- samples/dnn_face_detector.m | 65 +++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 20 deletions(-) diff --git a/samples/dnn_face_detector.m b/samples/dnn_face_detector.m index e70384328..533b456a7 100644 --- a/samples/dnn_face_detector.m +++ b/samples/dnn_face_detector.m @@ -21,38 +21,28 @@ % prepare video input cap = cv.VideoCapture(); pause(1); -assert(cap.isOpened()); +assert(cap.isOpened(), 'Could not initialize capturing'); %% % prepare figure frame = cap.read(); -assert(~isempty(frame)); +assert(~isempty(frame), 'Could not read frame'); hImg = imshow(frame); -sz = [size(frame,2) size(frame,1)]; %% -% video feed +% main loop over video feed while ishghandle(hImg) % read frame frame = cap.read(); if isempty(frame), break; end % detect faces - net.setInput(cv.Net.blobFromImages(flip(frame,3), blobOpts{:})); - detections = net.forward(); % SSD output is 1-by-1-by-ndetections-by-7 - detections = permute(detections, [3 4 2 1]); - - % draw detections - for i=1:size(detections,1) - % only strong detections - d = detections(i,:); - if d(2) == 1 && d(3) > confThreshold % 0: background, 1: face - % plot bounding boxes (coordinates are relative to image size) - frame = cv.rectangle(frame, d(4:5).*sz, d(6:7).*sz, ... - 'Color',[0 255 0], 'Thickness',2); - frame = cv.putText(frame, sprintf('conf = %3.0f%%', d(3)*100), ... - d(4:5).*sz - [0 4], 'Color',[255 0 0], 'FontScale',0.5); - end + [rects, confs] = detectFaces(frame, net, blobOpts, confThreshold); + for i=1:size(rects,1) + frame = cv.rectangle(frame, rects(i,:), ... + 'Color',[0 255 0], 'Thickness',2); + frame = cv.putText(frame, sprintf('conf = %3.0f%%', confs(i)*100), ... + rects(i,1:2) - [0 4], 'Color',[255 0 0], 'FontScale',0.5); end % show inference timing @@ -68,7 +58,42 @@ cap.release(); %% -% Helper function +% Helper functions + +function [rects, confs] = detectFaces(img, net, blobOpts, thresh) + %DETECTFACES Run face detection network to detect faces on input image + % + % You may play with input blob sizes to balance detection quality and + % efficiency. The bigger input blob the smaller faces may be detected. + % + + % detect faces + net.setInput(cv.Net.blobFromImages(flip(img,3), blobOpts{:})); + dets = net.forward(); + + % SSD output is 1-by-1-by-ndetections-by-7 + % d = [img_id, class_id, confidence, left, bottom, right, top] + dets = permute(dets, [3 4 2 1]); + + % filter out weak detections + if nargin < 4, thresh = 0.5; end + idx = (dets(:,2) == 1 & dets(:,3) > thresh); % 0: background, 1: face + dets = dets(idx,:); + + % adjust relative coordinates to image size + sz = [size(img,2) size(img,1)]; + dets(:,4:7) = bsxfun(@times, dets(:,4:7), [sz sz]); + + % output detections (clamp coords and remove small and out-of-bound rects) + rects = cv.Rect.from2points(dets(:,4:5), dets(:,6:7)); + rects = cv.Rect.intersect(rects, [0 0 sz]); + idx = (cv.Rect.area(rects) >= 10); + rects = rects(idx,:); + confs = dets(idx,3); +end + +%% +% Pretrained models function dname = get_dnn_dir(dname) %GET_DNN_DIR Path to model files, and show where to get them if missing From 72fafdea22a968fb3878d5c08fc04c646f130c6d Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 4 Feb 2018 17:55:58 +0200 Subject: [PATCH 33/36] dnn: new face recognition sample --- samples/dnn_face_recognition.m | 337 +++++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) create mode 100644 samples/dnn_face_recognition.m diff --git a/samples/dnn_face_recognition.m b/samples/dnn_face_recognition.m new file mode 100644 index 000000000..83885dd8b --- /dev/null +++ b/samples/dnn_face_recognition.m @@ -0,0 +1,337 @@ +%% DNN Face Detection and Recognition +% This tutorial will show us how to run deep learning models, with face +% detection and face recognition models pipeline. +% +% Sources: +% +% * +% * +% +%% Face detection +% Face detection network gets BGR image as input and produces set of bounding +% boxes that might contain faces. All that we need is just select the boxes +% with a strong confidence. +% +% Face detector is based on SSD framework (Single Shot MultiBox Detector), +% using a reduced ResNet-10 model. +% +%% Face recognition +% Network is called . Face +% recognition model receives RGB face image of size |96x96|. Then it returns +% 128-dimensional unit vector that represents input face as a point on the +% unit multidimensional sphere. So difference between two faces is an angle +% between two output vectors. +% +%% Code +% Start the demo, then press "Add a person" to name a person that is +% recognized as an unknown one. +% + +%% +% import deep learning models +[netDet, blobDetOpts] = ResNetSSD_FaceDetector(); +[netRec, blobRecOpts] = OpenFace_Embedding(); +assert(~netDet.empty() && ~netRec.empty()); + +%% +% options +confThreshold = 0.5; % minimum confidence for face detection +scoreThreshold = 0.5; % minimum score for face recognition + +%% +% prepare video input +cap = cv.VideoCapture(); +pause(1); +assert(cap.isOpened(), 'Could not initialize capturing'); +frame = cap.read(); +assert(~isempty(frame), 'Could not read frame'); + +%% +% prepare figure +hThumb = []; +hImg = imshow(frame); +hFig = ancestor(hImg, 'figure'); +setappdata(hFig, 'flag',false); +hBtn = uicontrol('Parent',hFig, 'Style','pushbutton', ... + 'Position',[20 20 100 20], 'String','Add a person', ... + 'Callback',@(~,~) setappdata(hFig, 'flag',true)); + +%% +% dataset of extracted faces and corresponding names +vecs = zeros(128,0,'single'); % feature vectors +names = cell(1,0); +faces = zeros([96 96 3 0], 'uint8'); % only needed for visualization + +%% +% main loop that receives a frames from a camera and makes a recognition of +% every detected face on the frame +while ishghandle(hImg) + % read frame + frame = cap.read(); + if isempty(frame), break; end + out = frame; + + % detect faces + rects = detectFaces(frame, netDet, blobDetOpts, confThreshold); + for i=1:size(rects,1) + % preprocess face + rect = rects(i,:); + face = alignFace(cv.Rect.crop(frame, rect)); + + % recognize face + vec = face2vec(face, netRec, blobRecOpts); + [name, score] = recognizeFace(vec, vecs, names, scoreThreshold); + + % show detection and prediction + out = insertAnnotation(out, rect, sprintf('%s (%.2f)', name, score), ... + 'Color',name2clr(name, names), 'TextColor',[255 255 255], ... + 'Thickness',2, 'FontScale',0.9); + end + + % update plot + set(hImg, 'CData',out); + drawnow; + + % check if add-a-person button is pressed + flag = ishghandle(hFig) && getappdata(hFig, 'flag'); + if flag && ~isempty(rects) + setappdata(hFig, 'flag',false); + + % prompt for name + name = inputdlg('Enter person name:'); + name = strtrim(name{1}); + + % face representation as feature vector + rect = rects(1,:); + face = alignFace(cv.Rect.crop(frame, rect)); + vec = face2vec(face, netRec, blobRecOpts); + + % store + vecs(:,end+1) = vec; + names{end+1} = name; + + % visualize face + name + face = cv.resize(face, [96 96]); + face = cv.putText(face, name, [5 25], ... + 'Color',name2clr(name, names), 'FontScale',0.6, 'Thickness',2); + faces(:,:,:,end+1) = face; + + % show montage of tracked people + if ishghandle(hThumb) + clf(hThumb, 'reset'); + figure(hThumb) + else + hThumb = figure; + end + montage(faces, 'Size',[NaN 2]); + movegui(hThumb, 'east') + end +end +cap.release(); + +%% +% Helper functions + +function [rects, confs] = detectFaces(img, net, blobOpts, thresh) + %DETECTFACES Run face detection network to detect faces on input image + % + % You may play with input blob sizes to balance detection quality and + % efficiency. The bigger input blob the smaller faces may be detected. + % + + % detect faces + net.setInput(cv.Net.blobFromImages(flip(img,3), blobOpts{:})); + dets = net.forward(); + + % SSD output is 1-by-1-by-ndetections-by-7 + % d = [img_id, class_id, confidence, left, bottom, right, top] + dets = permute(dets, [3 4 2 1]); + + % filter out weak detections + if nargin < 4, thresh = 0.5; end + idx = (dets(:,2) == 1 & dets(:,3) > thresh); % 0: background, 1: face + dets = dets(idx,:); + + % adjust relative coordinates to image size + sz = [size(img,2) size(img,1)]; + dets(:,4:7) = bsxfun(@times, dets(:,4:7), [sz sz]); + + % output detections (clamp coords and remove small and out-of-bound rects) + rects = cv.Rect.from2points(dets(:,4:5), dets(:,6:7)); + rects = cv.Rect.intersect(rects, [0 0 sz]); + idx = (cv.Rect.area(rects) >= 10); + rects = rects(idx,:); + confs = dets(idx,3); +end + +function img = alignFace(img) + %ALIGNFACE Align face to make the eyes and bottom lip appear in the same location + % + % OpenFace expects faces to be aligned, it uses Dlib. + % + + %TODO: not implemented, maybe we could port this: + % https://www.pyimagesearch.com/2017/05/22/face-alignment-with-opencv-and-python/ + + %TODO: we could also use facial landmarks from opencv_contrib face module + % (cv.Facemark and cv.FacemarkKazemi) +end + +function vec = face2vec(img, net, blobOpts) + %FACE2VEC Get 128 floating points feature vector + % + % Run face recognition network to receive 128-dimensional unit feature + % vector from input face image. + % + + net.setInput(cv.Net.blobFromImages(img, blobOpts{:})); + vec = net.forward(); + vec = vec(:); +end + +function [name, score] = recognizeFace(vec, vecs, names, thresh) + %RECOGNIZEFACE Perform face recognition + % + % Match a new feature vector with registered ones. Return a name of the + % best matched person. + % + % (NOTE: For more advanced usage, we could train an SVM classifier) + % + % See also: pdist2 + % + + if nargin < 4, thresh = 0.5; end + name = 'unknown'; + score = -1; + + if ~isempty(vecs) + scores = vec.' * vecs; % dot-product of vec against each vecs(:,i) + [s, idx] = max(scores); + if s > thresh + name = names{idx}; + score = s; + end + end +end + +function clr = name2clr(name, names) + clrs = round(255 * lines(7)); + idx = find(strcmp(name, names)); + if isempty(idx) + clr = [128 128 128]; + else + idx = rem(idx - 1, 7) + 1; + clr = clrs(idx,:); + end +end + +function img = insertAnnotation(img, rect, str, varargin) + % See also: insertObjectAnnotation, insertShape, insertText + p = inputParser(); + p.addParameter('Alpha', 0.6); + p.addParameter('Thickness', 1); + p.addParameter('Color', [255 255 0]); + p.addParameter('TextColor', [0 0 0]); + p.addParameter('FontFace', 'HersheySimplex'); + p.addParameter('FontScale', 0.4); + p.addParameter('AntiAlias', true); + p.addParameter('Shape', 'rectangle'); + p.parse(varargin{:}); + opts = p.Results; + opts.Shape = validatestring(opts.Shape, {'rectangle','circle'}); + thick = 1; + + [sz,b] = cv.getTextSize(str, 'Thickness',thick, ... + 'FontFace',opts.FontFace, 'FontScale',opts.FontScale); + txt_rect = [rect(1), rect(2)-sz(2)-b, sz(1), sz(2)+b]; + txt_orig = [rect(1), rect(2)-b]; + + if opts.AntiAlias + alias = {'LineType','AA'}; + else + alias = {'LineType',8}; + end + + overlay = img; + if strcmp(opts.Shape, 'rectangle') + overlay = cv.rectangle(overlay, rect, ... + 'Color',opts.Color, 'Thickness',opts.Thickness, alias{:}); + else + c = rect(1:2) + rect(3:4)/2; + r = max(rect(3:4)/2); + overlay = cv.circle(overlay, c, r, ... + 'Color',opts.Color, 'Thickness',opts.Thickness, alias{:}); + end + overlay = cv.rectangle(overlay, txt_rect, ... + 'Color',opts.Color, 'Thickness','Filled', alias{:}); + if opts.Thickness > 1 + overlay = cv.rectangle(overlay, txt_rect, ... + 'Color',opts.Color, 'Thickness',opts.Thickness, alias{:}); + end + overlay = cv.putText(overlay, str, txt_orig, ... + 'FontFace',opts.FontFace, 'FontScale',opts.FontScale, ... + 'Color',opts.TextColor, 'Thickness',thick, alias{:}); + + img = cv.addWeighted(img,1-opts.Alpha, overlay,opts.Alpha, 0); +end + +%% +% Pretrained models + +function dname = get_dnn_dir(dname) + %GET_DNN_DIR Path to model files, and show where to get them if missing + + dname = fullfile(mexopencv.root(), 'test', 'dnn', dname); + b = isdir(dname); + if ~b + % display help of calling function + % (assumed to be a local function in current file) + st = dbstack(1); + help([mfilename() filemarker() st(1).name]) + end + assert(b, 'Missing model: %s', dname); +end + +function [net, blobOpts] = ResNetSSD_FaceDetector() + %RESNETSSD_FACEDETECTOR face detector based on SSD framework with reduced ResNet-10 backbone + % + % homepage = https://github.com/opencv/opencv/blob/3.4.0/samples/dnn/face_detector/how_to_train_face_detector.txt + % + % ## Model + % + % file = test/dnn/ResNetSSD_FaceDetector/deploy.prototxt + % url = https://github.com/opencv/opencv/raw/3.4.0/samples/dnn/face_detector/deploy.prototxt + % hash = 006BAF926232DF6F6332DEFB9C24F94BB9F3764E + % + % ## Weights + % + % file = test/dnn/ResNetSSD_FaceDetector/res10_300x300_ssd_iter_140000.caffemodel + % url = https://github.com/opencv/opencv_3rdparty/raw/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel + % hash = 15aa726b4d46d9f023526d85537db81cbc8dd566 + % size = 10.1 MB + % + + dname = get_dnn_dir('ResNetSSD_FaceDetector'); + net = cv.Net('Caffe', ... + fullfile(dname, 'deploy.prototxt'), ... + fullfile(dname, 'res10_300x300_ssd_iter_140000.caffemodel')); + blobOpts = {'SwapRB',false, 'Crop',false, 'Size',[300 300], 'Mean',[104 117 123]}; +end + +function [net, blobOpts] = OpenFace_Embedding() + %OPENFACE OpenFace embedding for face recognition + % + % homepage = https://cmusatyalab.github.io/openface/ + % + % ## Model + Weights + % + % file = test/dnn/OpenFace/nn4.small2.v1.t7 + % url = https://storage.cmusatyalab.org/openface-models/nn4.small2.v1.t7 + % hash = ac8161a4376fb5a79ceec55d85bbb57ef81da9fe + % size = 30 MB + % + + dname = get_dnn_dir('OpenFace'); + net = cv.Net('Torch', fullfile(dname, 'nn4.small2.v1.t7')); + blobOpts = {'SwapRB',false, 'Crop',false, 'Size',[96 96], 'ScaleFactor',1/255}; +end From a5e5c4b01f3483a790116cf3d9d6d3c43e112f65 Mon Sep 17 00:00:00 2001 From: Amro Date: Sun, 4 Feb 2018 18:40:49 +0200 Subject: [PATCH 34/36] dnn: new style transfer sample --- samples/dnn_style_transfer.m | 212 +++++++++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 samples/dnn_style_transfer.m diff --git a/samples/dnn_style_transfer.m b/samples/dnn_style_transfer.m new file mode 100644 index 000000000..715c243f5 --- /dev/null +++ b/samples/dnn_style_transfer.m @@ -0,0 +1,212 @@ +%% DNN: Style Transfer +% Maps the artistic style of various pieces of artwork onto input image. +% +% This demo is used to run +% +% using OpenCV. It combines the content of one image with the style of another +% using convolutional neural networks. +% +% Pretrained models are available representing several styles: +% +% * The Muse, Pablo Picasso, 1935 +% +% * The Scream, Edvard Munch, 1893 +% +% * Udnie (Young American Girl, The Dance), Francis Picabia, 1913 +% +% * The Great Wave off Kanagawa, Hokusai, 1829-1832 +% +% * The Starry Night, Vincent Van Gogh, 1889 +% +% * Composition VII, Wassily Kandinsky, 1913 +% +% * Candy +% +% * Mosaic +% +% * Feathers +% +% +% References: +% +% * "Perceptual Losses for Real-Time Style Transfer and Super-Resolution". +% Justin Johnson, Alexandre Alahi, and Li Fei-Fei. ECCV 2016. +% +% +% * "Instance Normalization: The Missing Ingredient for Fast Stylization". +% Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. +% +% +% Sources: +% +% * +% + +%% +% input image +fname = fullfile(mexopencv.root(), 'test', 'lena.jpg'); +img = cv.imread(fname, 'Color',true, 'FlipChannels',false); + +%% +% load network +[net, blobOpts, ref] = StyleTransfer('the_scream'); +assert(~net.empty()); + +%% +% feed image to network +opts = parseBlobOpts(blobOpts{:}); +blob = cv.Net.blobFromImages(img, blobOpts{:}); +net.setInput(blob); + +%% +% run forward pass +tic +out = net.forward(); +toc + +%% +% recover output image +out = imageFromBlob(out, opts); +out = flip(out, 3); % BGR to RGB +if false + % use a median filter as a post-processing step + out = cv.medianBlur(out, 'KSize',3); +end + +%% +% show results +figure('Position',get(0, 'DefaultFigurePosition').*[0.5 1 2 1]) +subplot(131), imshow(flip(img, 3)), title('input') +subplot(132), imshow(out), title('output') +subplot(133), imshow(imread(ref)), title('reference') + +%% +% Helper functions + +function opts = parseBlobOpts(varargin) + p = inputParser(); + p.addParameter('ScaleFactor', 1.0); + p.addParameter('Size', [0 0]); % [w,h] + p.addParameter('Mean', [0 0 0]); % [r,g,b] + p.addParameter('SwapRB', true); + p.addParameter('Crop', true); + p.parse(varargin{:}); + opts = p.Results; +end + +function img = imageFromBlob(blob, opts) + img = permute(blob, [3 4 2 1]); % NCHW -> HWCN + img = img / opts.ScaleFactor; + if false && opts.SwapRB + opts.Mean([1 3]) = opts.Mean([3 1]); + end + img = bsxfun(@plus, img, reshape(opts.Mean, 1, 1, [])); + img = uint8(round(img)); +end + +%% +% Pretrained models + +function dname = get_dnn_dir(dname) + %GET_DNN_DIR Path to model files, and show where to get them if missing + + dname = fullfile(mexopencv.root(), 'test', 'dnn', dname); + b = isdir(dname); + if ~b + % display help of calling function + % (assumed to be a local function in current file) + st = dbstack(1); + help([mfilename() filemarker() st(1).name]) + end + assert(b, 'Missing model: %s', dname); +end + +function [net, blobOpts, fname] = StyleTransfer(style) + %STYLETRANSFER Style Transfer models [Torch] + % + % homepage = https://github.com/jcjohnson/fast-neural-style + % + % # Models from the ECCV 2016 paper + % + % ## Model + Weights + % + % file = test/dnn/StyleTransfer/eccv16/the_wave.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/eccv16/the_wave.t7 + % hash = ae2235b7d380c346cd009418efa012453e35d089 + % size = 24.3 MB + % + % file = test/dnn/StyleTransfer/eccv16/starry_night.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/eccv16/starry_night.t7 + % hash = 5b5e115253197b84d6c6ece1dafe6c15d7105ca6 + % size = 24.3 MB + % + % file = test/dnn/StyleTransfer/eccv16/la_muse.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/eccv16/la_muse.t7 + % hash = 66a0b11a82d2b635105771ca7e7fb5b87692a51b + % size = 24.3 MB + % + % file = test/dnn/StyleTransfer/eccv16/composition_vii.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/eccv16/composition_vii.t7 + % hash = c3bc362a742d833c2691fb02fd7904bd73ed6632 + % size = 27.0 MB + % + % # Models with instance normalization + % (smaller and faster models without sacrificing quality) + % + % ## Model + Weights + % + % file = test/dnn/StyleTransfer/instance_norm/candy.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/instance_norm/candy.t7 + % hash = 64cf17abf37f4b3ac6773d8524dd3ba47a4ff5c2 + % size = 14.8 MB + % + % file = test/dnn/StyleTransfer/instance_norm/la_muse.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/instance_norm/la_muse.t7 + % hash = cab9697c54cbc652bd5069dc734a0200d2b88f03 + % size = 14.8 MB + % + % file = test/dnn/StyleTransfer/instance_norm/mosaic.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/instance_norm/mosaic.t7 + % hash = f4d3e2a5e3060b3c39a9648ad009de3e09cd0001 + % size = 17.5 MB + % + % file = test/dnn/StyleTransfer/instance_norm/feathers.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/instance_norm/feathers.t7 + % hash = 9838007df750d483b5b5e90b92d76e8ada5a31c0 + % size = 17.5 MB + % + % file = test/dnn/StyleTransfer/instance_norm/the_scream.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/instance_norm/the_scream.t7 + % hash = ae36b9289cab525657b97a4ea63609c338137da7 + % size = 17.5 MB + % + % file = test/dnn/StyleTransfer/instance_norm/udnie.t7 + % url = https://cs.stanford.edu/people/jcjohns/fast-neural-style/models/instance_norm/udnie.t7 + % hash = db237afe0c2a08c93bb77f63c40d0fea8191c631 + % size = 10.7 MB + % + + if false + dname = get_dnn_dir(fullfile('StyleTransfer', 'eccv16')); + style = validatestring(style, ... + {'the_wave', 'starry_night', 'la_muse', 'composition_vii'}); + else + dname = get_dnn_dir(fullfile('StyleTransfer', 'instance_norm')); + style = validatestring(style, ... + {'candy', 'la_muse', 'mosaic', 'feathers', 'the_scream', 'udnie'}); + end + + % load model + net = cv.Net('Torch', fullfile(dname, [style '.t7'])); + blobOpts = {'SwapRB',false, 'Mean',[103.939, 116.779, 123.68]}; + + % reference style image + im = [style '.jpg']; + if strcmp(style, 'the_wave'), im = 'wave.jpg'; end + fname = fullfile(dname, im); + if exist(fname, 'file') ~= 2 + disp('Downloading style image...') + url = 'https://github.com/jcjohnson/fast-neural-style/raw/master/images/styles/'; + urlwrite([url im], fname); + end +end From d46bdbfbc4b3d3c0f3688ad0ddbeda6601a04418 Mon Sep 17 00:00:00 2001 From: Amro Date: Thu, 22 Feb 2018 13:29:34 +0200 Subject: [PATCH 35/36] update Doxyfile ran "doxygen -u" --- Doxyfile | 78 +++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 29 deletions(-) diff --git a/Doxyfile b/Doxyfile index 564600348..698d26f22 100644 --- a/Doxyfile +++ b/Doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.13 +# Doxyfile 1.8.14 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -20,8 +20,8 @@ # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. +# built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 @@ -226,7 +226,8 @@ TAB_SIZE = 4 # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. ALIASES = @@ -327,7 +328,7 @@ BUILTIN_STL_SUPPORT = YES CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. @@ -698,7 +699,7 @@ LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. @@ -790,7 +791,7 @@ INPUT = README.markdown \ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# documentation (see: https://www.gnu.org/software/libiconv/) for the list of # possible encodings. # The default value is: UTF-8. @@ -1008,7 +1009,7 @@ SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version +# (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: @@ -1054,6 +1055,17 @@ CLANG_ASSISTED_PARSING = NO CLANG_OPTIONS = +# If clang assisted parsing is enabled you can provide the clang parser with the +# path to the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files +# were built. This is equivalent to specifying the "-p" option to a clang tool, +# such as clang-check. These options will then be passed to the parser. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse-libclang=ON option for CMake. +# The default value is: 0. + +CLANG_COMPILATION_DATABASE_PATH = 0 + #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- @@ -1172,7 +1184,7 @@ HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. @@ -1208,6 +1220,17 @@ HTML_COLORSTYLE_GAMMA = 80 HTML_TIMESTAMP = YES +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via Javascript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have Javascript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. @@ -1231,12 +1254,12 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# environment (see: https://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# startup. See https://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1352,7 +1375,7 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# (see: http://doc.qt.io/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1360,8 +1383,7 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). +# Folders (see: http://doc.qt.io/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1369,23 +1391,21 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# http://doc.qt.io/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = @@ -1478,7 +1498,7 @@ EXT_LINKS_IN_WINDOW = NO FORMULA_FONTSIZE = 10 -# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # @@ -1490,7 +1510,7 @@ FORMULA_FONTSIZE = 10 FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering +# https://www.mathjax.org) which uses client side Javascript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path @@ -1517,8 +1537,8 @@ MATHJAX_FORMAT = HTML-CSS # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://www.mathjax.org/mathjax @@ -1579,7 +1599,7 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). +# Xapian (see: https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1592,7 +1612,7 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). See the section "External Indexing and +# Xapian (see: https://xapian.org/). See the section "External Indexing and # Searching" for details. # This tag requires that the tag SEARCHENGINE is set to YES. @@ -1779,7 +1799,7 @@ LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See -# http://en.wikipedia.org/wiki/BibTeX and \cite for more info. +# https://en.wikipedia.org/wiki/BibTeX and \cite for more info. # The default value is: plain. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1962,9 +1982,9 @@ DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an -# AutoGen Definitions (see http://autogen.sf.net) file that captures the -# structure of the code including all documentation. Note that this feature is -# still experimental and incomplete at the moment. +# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures +# the structure of the code including all documentation. Note that this feature +# is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO From c370b779d29fdcd2e20748eeca7d9eb984693894 Mon Sep 17 00:00:00 2001 From: Amro Date: Thu, 22 Feb 2018 13:32:02 +0200 Subject: [PATCH 36/36] bump version to 3.4.0 and update matlab docs integration --- .travis.yml | 16 ++++---- Contents.m | 26 +++++++++++-- Doxyfile | 2 +- README.markdown | 56 +++++++++++++++++----------- appveyor.yml | 8 ++-- doc/helptoc.xml | 20 +++++++++- opencv_contrib/samples/demos.xml | 64 +++++++++++++++++++++++++++++++- samples/demos.xml | 15 ++++++++ 8 files changed, 168 insertions(+), 39 deletions(-) diff --git a/.travis.yml b/.travis.yml index ab58cb319..b17e71723 100644 --- a/.travis.yml +++ b/.travis.yml @@ -74,12 +74,12 @@ install: #- sudo apt-get install -y libgdcm2-dev libgdal-dev #- sudo apt-get install -y libgtk-3-dev libvtk6-dev libopenblas-dev - # build OpenCV 3.3.1 from source (opencv + opencv_contrib) + # build OpenCV 3.4.0 from source (opencv + opencv_contrib) - mkdir $HOME/cv && pushd $HOME/cv - - wget -O opencv-3.3.1.zip https://github.com/opencv/opencv/archive/3.3.1.zip - - wget -O opencv_contrib-3.3.1.zip https://github.com/opencv/opencv_contrib/archive/3.3.1.zip - - unzip opencv-3.3.1.zip > /dev/null - - unzip opencv_contrib-3.3.1.zip > /dev/null + - wget -O opencv-3.4.0.zip https://github.com/opencv/opencv/archive/3.4.0.zip + - wget -O opencv_contrib-3.4.0.zip https://github.com/opencv/opencv_contrib/archive/3.4.0.zip + - unzip opencv-3.4.0.zip > /dev/null + - unzip opencv_contrib-3.4.0.zip > /dev/null - mkdir build && cd build - cmake -G "$CMAKE_GEN" -Wno-dev -DBUILD_DOCS:BOOL=OFF @@ -89,8 +89,10 @@ install: -DBUILD_TESTS:BOOL=OFF -DBUILD_WITH_DEBUG_INFO:BOOL=OFF -DBUILD_ITT:BOOL=OFF + -DBUILD_JAVA:BOOL=OFF -DCV_TRACE:BOOL=OFF -DENABLE_PYLINT:BOOL=OFF + -DENABLE_CXX11:BOOL=ON -DWITH_CUDA:BOOL=OFF -DWITH_CUBLAS:BOOL=OFF -DWITH_CUFFT:BOOL=OFF @@ -116,10 +118,10 @@ install: -DBUILD_opencv_js:BOOL=OFF -DBUILD_opencv_python2:BOOL=OFF -DBUILD_opencv_python3:BOOL=OFF + -DBUILD_opencv_python_bindings_generator:BOOL=OFF -DBUILD_opencv_ts:BOOL=OFF -DBUILD_opencv_viz:BOOL=OFF -DBUILD_opencv_world:BOOL=OFF - -DBUILD_opencv_contrib_world:BOOL=OFF -DBUILD_opencv_matlab:BOOL=OFF -DBUILD_opencv_ccalib:BOOL=OFF -DBUILD_opencv_cvv:BOOL=OFF @@ -130,7 +132,7 @@ install: -DCMAKE_BUILD_TYPE:STRING=Release -DCMAKE_INSTALL_PREFIX:PATH=$INSTALL_PREFIX -DOPENCV_ENABLE_NONFREE:BOOL=ON - -DOPENCV_EXTRA_MODULES_PATH:PATH=$HOME/cv/opencv_contrib-3.3.1/modules $HOME/cv/opencv-3.3.1 + -DOPENCV_EXTRA_MODULES_PATH:PATH=$HOME/cv/opencv_contrib-3.4.0/modules $HOME/cv/opencv-3.4.0 - cmake --build . - sudo cmake --build . --target install - popd diff --git a/Contents.m b/Contents.m index f6d3d8776..ec7bc357c 100644 --- a/Contents.m +++ b/Contents.m @@ -1,5 +1,5 @@ % mexopencv -% Version 3.3.1 (R2017a) 26-November-2017 +% Version 3.4.0 (R2017a) 22-February-2018 % %% opencv: Main Modules % @@ -111,8 +111,8 @@ % cv.logPolar - Remaps an image to semilog-polar coordinates space % cv.linearPolar - Remaps an image to polar coordinates space % cv.integral - Calculates the integral of an image -% cv.accumulate - Adds an image to the accumulator -% cv.accumulateSquare - Adds the square of a source image to the accumulator +% cv.accumulate - Adds an image to the accumulator image +% cv.accumulateSquare - Adds the square of a source image to the accumulator image % cv.accumulateProduct - Adds the per-element product of two input images to the accumulator % cv.accumulateWeighted - Updates a running average % cv.phaseCorrelate - Detect translational shifts that occur between two images @@ -248,6 +248,15 @@ % cv.decomposeHomographyMat - Decompose a homography matrix to rotation(s), translation(s) and plane normal(s) % cv.StereoBM - Class for computing stereo correspondence using the block matching algorithm % cv.StereoSGBM - Class for computing stereo correspondence using the semi-global block matching algorithm +% cv.fisheyeProjectPoints - Projects points using fisheye model +% cv.fisheyeDistortPoints - Distorts 2D points using fisheye model +% cv.fisheyeUndistortPoints - Undistorts 2D points using fisheye model +% cv.fisheyeInitUndistortRectifyMap - Computes undistortion and rectification maps (fisheye) +% cv.fisheyeUndistortImage - Transforms an image to compensate for fisheye lens distortion +% cv.fisheyeEstimateNewCameraMatrixForUndistortRectify - Estimates new camera matrix for undistortion or rectification (fisheye) +% cv.fisheyeCalibrate - Performs camera calibaration (fisheye) +% cv.fisheyeStereoRectify - Stereo rectification for fisheye camera model +% cv.fisheyeStereoCalibrate - Performs stereo calibration (fisheye) % % features2d: 2D Features Framework % cv.KeyPointsFilter - Methods to filter a vector of keypoints @@ -381,6 +390,9 @@ % cv.BackgroundSubtractorMOG - Gaussian Mixture-based Background/Foreground Segmentation Algorithm % cv.BackgroundSubtractorGMG - Background Subtractor module % cv.BackgroundSubtractorCNT - Background subtraction based on counting +% cv.BackgroundSubtractorGSOC - Background Subtraction implemented during GSOC +% cv.BackgroundSubtractorLSBP - Background Subtraction using Local SVD Binary Pattern +% cv.SyntheticSequenceGenerator - Synthetic frame sequence generator for testing background subtraction algorithms % % bioinspired: Biologically Inspired Vision Models and Derivated Tools % cv.Retina - A biological retina model for image spatio-temporal noise and luminance changes enhancement @@ -393,10 +405,12 @@ % dpm: Deformable Part-based Models % cv.DPMDetector - Deformable Part-based Models (DPM) detector % -% face: Face Recognition +% face: Face Analysis % cv.BasicFaceRecognizer - Face Recognition based on Eigen-/Fisher-faces % cv.LBPHFaceRecognizer - Face Recognition based on Local Binary Patterns % cv.BIF - Implementation of bio-inspired features (BIF) +% cv.Facemark - Base class for all facemark models +% cv.FacemarkKazemi - Face Alignment % % img_hash: Image Hashing Algorithms % cv.ImgHash - Base class for Image Hashing algorithms @@ -462,6 +476,7 @@ % cv.DisparityWLSFilter - Disparity map filter based on Weighted Least Squares filter % cv.EdgeAwareInterpolator - Sparse match interpolation algorithm % cv.StructuredEdgeDetection - Class implementing edge detection algorithm +% cv.EdgeBoxes - Class implementing Edge Boxes algorithm % cv.SuperpixelSEEDS - Class implementing the SEEDS (Superpixels Extracted via Energy-Driven Sampling) superpixels algorithm % cv.SuperpixelSLIC - Class implementing the SLIC (Simple Linear Iterative Clustering) superpixels algorithm % cv.SuperpixelLSC - Class implementing the LSC (Linear Spectral Clustering) superpixels algorithm @@ -475,6 +490,9 @@ % cv.GradientPaillou - Applies Paillou filter to an image % cv.GradientDeriche - Applies Deriche filter to an image % cv.PeiLinNormalization - Calculates an affine transformation that normalize given image using Pei/Lin Normalization +% cv.ContourFitting - Contour Fitting algorithm using Fourier descriptors +% cv.RidgeDetectionFilter - Ridge Detection Filter +% cv.BrightEdges - Bright edges detector % cv.niBlackThreshold - Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired % cv.thinning - Applies a binary blob thinning operation, to achieve a skeletization of the input image % cv.anisotropicDiffusion - Performs anisotropic diffusion on an image diff --git a/Doxyfile b/Doxyfile index 698d26f22..8974f00c4 100644 --- a/Doxyfile +++ b/Doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = mexopencv # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 3.3.1 +PROJECT_NUMBER = 3.4.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/README.markdown b/README.markdown index 2cad0e734..f6b131a3b 100644 --- a/README.markdown +++ b/README.markdown @@ -12,9 +12,21 @@ MATLAB's native data type and OpenCV data types. The package is suitable for fast prototyping of OpenCV application in MATLAB, use of OpenCV as an external toolbox in MATLAB, and development of custom MEX functions. -The current version of mexopencv is compatible with OpenCV 3.3. -For older OpenCV versions, please checkout the corresponding branches -([v3.2][21], [v3.1][20], [v3.0][19], [v2.4][18], [v2.3][17], and [v2.1][16]). +The current version of mexopencv is compatible with OpenCV 3.4.0. + +For previous OpenCV 3.x versions, checkout the corresponding tags: + +- [v3.3.1][23] +- [v3.3.0][22] +- [v3.2.0][21] +- [v3.1.0][20] +- [v3.0.0][19] + +For OpenCV 2.x, checkout these older branches: + +- [v2.4][18] (last tested with OpenCV v2.4.11) +- [v2.3][17] +- [v2.1][16] Consult the [wiki][3] for help. @@ -58,7 +70,7 @@ Build Prerequisite - [MATLAB][4] or [Octave][5] (>= 4.0.0) -- [OpenCV][6] (3.3.1) +- [OpenCV][6] (3.4.0) Depending on your platform, you also need the required build tools: @@ -74,7 +86,7 @@ Refer to the [wiki][3] for detailed build instructions. OpenCV ------ -Currently, mexopencv targets the final **3.3.1** stable version of OpenCV. You +Currently, mexopencv targets the final **3.4.0** stable version of OpenCV. You must build it against this exact version, rather than using the bleeding-edge dev-version of `opencv` and `opencv_contrib`. UNIX users should consider using a package manager to install OpenCV if available. @@ -83,19 +95,18 @@ a package manager to install OpenCV if available. - [OpenCV contributed modules][8] **DO NOT use the "master" branch of `opencv` and `opencv_contrib`!** -**Only the 3.3.1 release is supported by mexopencv.** +**Only the 3.4.0 release is supported by mexopencv.** Linux ----- -First make sure you have OpenCV 3.3.1 installed in the system: +First make sure you have OpenCV 3.4.0 installed in the system: - if applicable, install OpenCV 3 package available in your package manager (e.g., `libopencv-dev` in Debian/Ubuntu, `opencv-devel` in Fedora). - Sadly the [OpenCV package][9] provided in Ubuntu 17.04 is still behind - with OpenCV 2.4, which can only be used with the [v2.4 release][18] of - mexopencv. -- otherwise, you need to build and install OpenCV from [source][7]: + Note that these packages are not always up-to-date, so you might need to use + older mexopencv versions to match their [opencv package][9] version. +- otherwise, you can always build and install OpenCV from [source][7]: $ cd $ cmake @@ -104,7 +115,7 @@ First make sure you have OpenCV 3.3.1 installed in the system: At this point, you should make sure that the [`pkg-config`][10] command can identify and locate OpenCV libraries (if needed, set the `PKG_CONFIG_PATH` -environment variable to help it find `opencv.pc`): +environment variable to help it find the `opencv.pc` file): $ pkg-config --cflags --libs opencv @@ -117,7 +128,7 @@ directory if you installed MATLAB to a non-default location: $ make MATLABDIR=/opt/local/MATLAB/R2017a -You can also work with [Octave][5] by specifying: +You can also work with [Octave][5] instead of MATLAB by specifying: $ make WITH_OCTAVE=true @@ -127,7 +138,7 @@ mexopencv as: $ make all contrib -Optionally you can test mexopencv functionality: +Finally you can test mexopencv functionality: $ make test @@ -152,7 +163,8 @@ OpenCV 3: Otherwise, you can build OpenCV from [source][7], similar to the Linux case. -If you have all the prerequisites, go to the mexopencv directory and type: +If you have all the prerequisites, go to the mexopencv directory and run +(modifying the options as needed): $ make MATLABDIR=/Applications/MATLAB_R2016a.app PKG_CONFIG_MATLAB=opencv3 LDFLAGS=-L/usr/local/share/OpenCV/3rdparty/lib -j2 @@ -256,9 +268,9 @@ The code may be redistributed under the [BSD 3-Clause license](LICENSE). [4]: https://www.mathworks.com/products/matlab.html [5]: https://www.gnu.org/software/octave/ [6]: https://opencv.org/ -[7]: https://github.com/opencv/opencv/releases/tag/3.3.1 -[8]: https://github.com/opencv/opencv_contrib/releases/tag/3.3.1 -[9]: https://packages.ubuntu.com/zesty/libopencv-dev +[7]: https://github.com/opencv/opencv/releases/tag/3.4.0 +[8]: https://github.com/opencv/opencv_contrib/releases/tag/3.4.0 +[9]: https://packages.ubuntu.com/artful/libopencv-dev [10]: https://people.freedesktop.org/~dbn/pkg-config-guide.html [11]: https://brew.sh/ [12]: http://kyamagu.github.io/mexopencv/matlab @@ -268,6 +280,8 @@ The code may be redistributed under the [BSD 3-Clause license](LICENSE). [16]: https://github.com/kyamagu/mexopencv/tree/v2.1 [17]: https://github.com/kyamagu/mexopencv/tree/v2.3 [18]: https://github.com/kyamagu/mexopencv/tree/v2.4 -[19]: https://github.com/kyamagu/mexopencv/tree/v3.0 -[20]: https://github.com/kyamagu/mexopencv/tree/v3.1 -[21]: https://github.com/kyamagu/mexopencv/tree/v3.2 +[19]: https://github.com/kyamagu/mexopencv/tree/v3.0.0 +[20]: https://github.com/kyamagu/mexopencv/tree/v3.1.0.1 +[21]: https://github.com/kyamagu/mexopencv/tree/v3.2.0 +[22]: https://github.com/kyamagu/mexopencv/tree/v3.3.0 +[23]: https://github.com/kyamagu/mexopencv/tree/v3.3.1 diff --git a/appveyor.yml b/appveyor.yml index 5465a2c20..d630bd928 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -4,11 +4,11 @@ # # Note: # We use prepared opencv binaries built using the same MinGW/Octave config, -# see: https://github.com/amroamroamro/opencv/blob/tag_3.3.1/appveyor.yml +# see: https://github.com/amroamroamro/opencv/blob/tag_3.4.0/appveyor.yml # # version format -version: 3.3.1.{build} +version: 3.4.0.{build} # clone directory clone_folder: c:\dev\mexopencv @@ -57,9 +57,9 @@ install: - cd "c:\dev" - if "%WITH_DOXY%" == "yes" ( choco install doxygen.portable -y -r ) - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.2.0/mingw32-make.exe" - - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.3.1/cv331_x86_mingw.7z" + - ps: Start-FileDownload "https://github.com/amroamroamro/opencv/releases/download/3.4.0/cv340_x86_mingw.7z" - ps: Start-FileDownload "https://ftp.gnu.org/gnu/octave/windows/octave-4.2.1-w32.zip" - - 7z x "c:\dev\cv331_x86_mingw.7z" -o"c:\dev\build" -y > nul + - 7z x "c:\dev\cv340_x86_mingw.7z" -o"c:\dev\build" -y > nul - 7z x "c:\dev\octave-4.2.1-w32.zip" -o"c:\dev" -y > nul - copy /y "%OCTAVE_HOME%\bin\libopenblas.dll" "%OCTAVE_HOME%\bin\libblas.dll" > nul - set "PATH=%OCTAVE_HOME%\bin;%OPENCV_DIR%\x86\mingw\bin;c:\dev;%PATH%" diff --git a/doc/helptoc.xml b/doc/helptoc.xml index b97f62780..f5fa79bf8 100644 --- a/doc/helptoc.xml +++ b/doc/helptoc.xml @@ -269,6 +269,15 @@ cv.decomposeHomographyMat cv.StereoBM cv.StereoSGBM + cv.fisheyeProjectPoints + cv.fisheyeDistortPoints + cv.fisheyeUndistortPoints + cv.fisheyeInitUndistortRectifyMap + cv.fisheyeUndistortImage + cv.fisheyeEstimateNewCameraMatrixForUndistortRectify + cv.fisheyeCalibrate + cv.fisheyeStereoRectify + cv.fisheyeStereoCalibrate features2d: 2D Features Framework cv.KeyPointsFilter @@ -402,6 +411,9 @@ cv.BackgroundSubtractorMOG cv.BackgroundSubtractorGMG cv.BackgroundSubtractorCNT + cv.BackgroundSubtractorGSOC + cv.BackgroundSubtractorLSBP + cv.SyntheticSequenceGenerator bioinspired: Biologically Inspired Vision Models and Derivated Tools cv.Retina @@ -414,10 +426,12 @@ dpm: Deformable Part-based Models cv.DPMDetector - face: Face Recognition + face: Face Analysis cv.BasicFaceRecognizer cv.LBPHFaceRecognizer cv.BIF + cv.Facemark + cv.FacemarkKazemi img_hash: Image Hashing Algorithms cv.ImgHash @@ -483,6 +497,7 @@ cv.DisparityWLSFilter cv.EdgeAwareInterpolator cv.StructuredEdgeDetection + cv.EdgeBoxes cv.SuperpixelSEEDS cv.SuperpixelSLIC cv.SuperpixelLSC @@ -496,6 +511,9 @@ cv.GradientPaillou cv.GradientDeriche cv.PeiLinNormalization + cv.ContourFitting + cv.RidgeDetectionFilter + cv.BrightEdges cv.niBlackThreshold cv.thinning cv.anisotropicDiffusion diff --git a/opencv_contrib/samples/demos.xml b/opencv_contrib/samples/demos.xml index 0430b4f63..fb1eb20de 100644 --- a/opencv_contrib/samples/demos.xml +++ b/opencv_contrib/samples/demos.xml @@ -71,6 +71,11 @@ M-file BackgroundSubtractorDemo + + + M-file + bgsegm_synthetic_seq_demo + @@ -107,7 +112,7 @@ - + M-file @@ -117,6 +122,53 @@ Statistics and Machine Learning Toolbox + + + M-file + facemark_kazemi_detect_img_demo + + + + M-file + facemark_kazemi_detect_vid_demo + + + + M-file + facemark_kazemi_train_demo + + + + M-file + facemark_kazemi_train2_demo + + + + M-file + facemark_kazemi_train_config_demo + + + + M-file + facemark_aam_train_demo + + + + M-file + facemark_lbf_train_demo + + + + M-file + facemark_lbf_fitting_demo + + + + M-file + face_swapping_demo + + Image Processing Toolbox + @@ -269,6 +321,16 @@ M-file peilin_demo + + + M-GUI + brightedges_demo_gui + + + + M-GUI + fourier_descriptors_demo_gui + diff --git a/samples/demos.xml b/samples/demos.xml index 9b86a3909..9ef0f6489 100644 --- a/samples/demos.xml +++ b/samples/demos.xml @@ -78,6 +78,11 @@ M-file dft_demo + + + M-file + kaleidoscope_demo + @@ -800,6 +805,16 @@ M-file dnn_face_detector + + + M-file + dnn_face_recognition + + + + M-file + dnn_style_transfer +