Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Upgrade OpenCV library to 2.4.3.

Changed C++ standard library used from libstdc++ to libc++.
  • Loading branch information...
commit 18c39baaa9ac89b563be7ed23c40b823b2a70c08 1 parent b55e4ea
@jeradesign authored
Showing with 5,584 additions and 4,195 deletions.
  1. +10 −12 CVFunhouse.xcodeproj/project.pbxproj
  2. +1 −1  opencv2.framework/Versions/A/Headers/calib3d/calib3d.hpp
  3. +20 −16 opencv2.framework/Versions/A/Headers/contrib/contrib.hpp
  4. +6 −2 opencv2.framework/Versions/A/Headers/contrib/detection_based_tracker.hpp
  5. +405 −0 opencv2.framework/Versions/A/Headers/contrib/openfabmap.hpp
  6. +184 −184 opencv2.framework/Versions/A/Headers/contrib/retina.hpp
  7. +151 −28 opencv2.framework/Versions/A/Headers/core/core.hpp
  8. +185 −0 opencv2.framework/Versions/A/Headers/core/cuda_devptrs.hpp
  9. +43 −161 opencv2.framework/Versions/A/Headers/core/devmem2d.hpp
  10. +93 −5 opencv2.framework/Versions/A/Headers/core/eigen.hpp
  11. +577 −565 opencv2.framework/Versions/A/Headers/core/gpumat.hpp
  12. +59 −35 opencv2.framework/Versions/A/Headers/core/internal.hpp
  13. +1 −1  opencv2.framework/Versions/A/Headers/core/mat.hpp
  14. +335 −335 opencv2.framework/Versions/A/Headers/core/opengl_interop.hpp
  15. +102 −48 opencv2.framework/Versions/A/Headers/core/operations.hpp
  16. +1 −1  opencv2.framework/Versions/A/Headers/core/version.hpp
  17. +8 −8 opencv2.framework/Versions/A/Headers/core/wimage.hpp
  18. +112 −10 opencv2.framework/Versions/A/Headers/features2d/features2d.hpp
  19. +1 −0  opencv2.framework/Versions/A/Headers/flann/defines.h
  20. +41 −4 opencv2.framework/Versions/A/Headers/flann/dist.h
  21. +49 −49 opencv2.framework/Versions/A/Headers/flann/flann.hpp
  22. +2 −2 opencv2.framework/Versions/A/Headers/flann/hierarchical_clustering_index.h
  23. +12 −12 opencv2.framework/Versions/A/Headers/flann/kdtree_single_index.h
  24. +7 −5 opencv2.framework/Versions/A/Headers/flann/lsh_index.h
  25. +1 −1  opencv2.framework/Versions/A/Headers/flann/lsh_table.h
  26. +25 −19 opencv2.framework/Versions/A/Headers/flann/miniflann.hpp
  27. +163 −0 opencv2.framework/Versions/A/Headers/highgui/cap_ios.h
  28. +15 −13 opencv2.framework/Versions/A/Headers/highgui/highgui.hpp
  29. +30 −12 opencv2.framework/Versions/A/Headers/highgui/highgui_c.h
  30. +40 −1 opencv2.framework/Versions/A/Headers/imgproc/imgproc.hpp
  31. +1 −1  opencv2.framework/Versions/A/Headers/imgproc/imgproc_c.h
  32. +8 −8 opencv2.framework/Versions/A/Headers/legacy/blobtrack.hpp
  33. +11 −11 opencv2.framework/Versions/A/Headers/legacy/compat.hpp
  34. +22 −23 opencv2.framework/Versions/A/Headers/legacy/legacy.hpp
  35. +2 −2 opencv2.framework/Versions/A/Headers/legacy/streams.hpp
  36. +0 −16 opencv2.framework/Versions/A/Headers/ml/ml.hpp
  37. +20 −20 opencv2.framework/Versions/A/Headers/nonfree/features2d.hpp
  38. +1 −1  opencv2.framework/Versions/A/Headers/nonfree/nonfree.hpp
  39. +65 −37 opencv2.framework/Versions/A/Headers/objdetect/objdetect.hpp
  40. +1 −1  opencv2.framework/Versions/A/Headers/opencv_modules.hpp
  41. +18 −1 opencv2.framework/Versions/A/Headers/photo/photo.hpp
  42. +69 −69 opencv2.framework/Versions/A/Headers/photo/photo_c.h
  43. +65 −65 opencv2.framework/Versions/A/Headers/stitching/detail/autocalib.hpp
  44. +137 −137 opencv2.framework/Versions/A/Headers/stitching/detail/blenders.hpp
  45. +106 −106 opencv2.framework/Versions/A/Headers/stitching/detail/exposure_compensate.hpp
  46. +188 −188 opencv2.framework/Versions/A/Headers/stitching/detail/matchers.hpp
  47. +205 −205 opencv2.framework/Versions/A/Headers/stitching/detail/motion_estimators.hpp
  48. +259 −149 opencv2.framework/Versions/A/Headers/stitching/detail/seam_finders.hpp
  49. +162 −162 opencv2.framework/Versions/A/Headers/stitching/detail/util.hpp
  50. +127 −127 opencv2.framework/Versions/A/Headers/stitching/detail/util_inl.hpp
  51. +515 −510 opencv2.framework/Versions/A/Headers/stitching/detail/warpers.hpp
  52. +765 −764 opencv2.framework/Versions/A/Headers/stitching/detail/warpers_inl.hpp
  53. +13 −9 opencv2.framework/Versions/A/Headers/stitching/stitcher.hpp
  54. +20 −20 opencv2.framework/Versions/A/Headers/stitching/warpers.hpp
  55. +84 −17 opencv2.framework/Versions/A/Headers/video/background_segm.hpp
  56. +30 −5 opencv2.framework/Versions/A/Headers/video/tracking.hpp
  57. +11 −11 opencv2.framework/Versions/A/Resources/Info.plist
  58. BIN  opencv2.framework/Versions/A/opencv2
View
22 CVFunhouse.xcodeproj/project.pbxproj
@@ -12,11 +12,11 @@
4209534715C0BEB200C37CD8 /* CVFMotionTemplates.m in Sources */ = {isa = PBXBuildFile; fileRef = 4209534615C0BEB200C37CD8 /* CVFMotionTemplates.m */; };
420A3F8515BC6724004804EF /* CVFPassThru.m in Sources */ = {isa = PBXBuildFile; fileRef = 420A3F8415BC6724004804EF /* CVFPassThru.m */; };
42171D1215BA6C19008BEA68 /* opencv2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 42171D1115BA6C19008BEA68 /* opencv2.framework */; };
- 42171D1415BA6C37008BEA68 /* libstdc++.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 42171D1315BA6C37008BEA68 /* libstdc++.dylib */; };
423FA83715DB1AD3001D30AC /* SwitchCameraIcon44.png in Resources */ = {isa = PBXBuildFile; fileRef = 423FA83515DB1AD3001D30AC /* SwitchCameraIcon44.png */; };
423FA83815DB1AD3001D30AC /* SwitchCameraIcon44@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 423FA83615DB1AD3001D30AC /* SwitchCameraIcon44@2x.png */; };
423FA83B15DB264B001D30AC /* iButton30.png in Resources */ = {isa = PBXBuildFile; fileRef = 423FA83915DB264B001D30AC /* iButton30.png */; };
423FA83C15DB264B001D30AC /* iButton30@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 423FA83A15DB264B001D30AC /* iButton30@2x.png */; };
+ 4249F7AB16697A0900271FD5 /* libc++.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 4249F7AA16697A0900271FD5 /* libc++.dylib */; };
4253708C1508383800BFE3C0 /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4253708B1508383800BFE3C0 /* AVFoundation.framework */; };
425370901508384900BFE3C0 /* CoreMedia.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4253708E1508384900BFE3C0 /* CoreMedia.framework */; };
425370911508384900BFE3C0 /* CoreVideo.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4253708F1508384900BFE3C0 /* CoreVideo.framework */; };
@@ -54,11 +54,11 @@
420A3F8315BC6724004804EF /* CVFPassThru.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = CVFPassThru.h; path = CVFunhouse/CVFPassThru.h; sourceTree = "<group>"; };
420A3F8415BC6724004804EF /* CVFPassThru.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = CVFPassThru.m; path = CVFunhouse/CVFPassThru.m; sourceTree = "<group>"; };
42171D1115BA6C19008BEA68 /* opencv2.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; path = opencv2.framework; sourceTree = "<group>"; };
- 42171D1315BA6C37008BEA68 /* libstdc++.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = "libstdc++.dylib"; path = "usr/lib/libstdc++.dylib"; sourceTree = SDKROOT; };
423FA83515DB1AD3001D30AC /* SwitchCameraIcon44.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = SwitchCameraIcon44.png; sourceTree = "<group>"; };
423FA83615DB1AD3001D30AC /* SwitchCameraIcon44@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "SwitchCameraIcon44@2x.png"; sourceTree = "<group>"; };
423FA83915DB264B001D30AC /* iButton30.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = iButton30.png; sourceTree = "<group>"; };
423FA83A15DB264B001D30AC /* iButton30@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "iButton30@2x.png"; sourceTree = "<group>"; };
+ 4249F7AA16697A0900271FD5 /* libc++.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = "libc++.dylib"; path = "usr/lib/libc++.dylib"; sourceTree = SDKROOT; };
4253708B1508383800BFE3C0 /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; };
4253708E1508384900BFE3C0 /* CoreMedia.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreMedia.framework; path = System/Library/Frameworks/CoreMedia.framework; sourceTree = SDKROOT; };
4253708F1508384900BFE3C0 /* CoreVideo.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreVideo.framework; path = System/Library/Frameworks/CoreVideo.framework; sourceTree = SDKROOT; };
@@ -106,7 +106,7 @@
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
- 42171D1415BA6C37008BEA68 /* libstdc++.dylib in Frameworks */,
+ 4249F7AB16697A0900271FD5 /* libc++.dylib in Frameworks */,
42171D1215BA6C19008BEA68 /* opencv2.framework in Frameworks */,
4275AFCC15095A2C00D74063 /* libz.dylib in Frameworks */,
42AA08581508314600B379F4 /* UIKit.framework in Frameworks */,
@@ -193,7 +193,7 @@
4253708F1508384900BFE3C0 /* CoreVideo.framework */,
42171D1115BA6C19008BEA68 /* opencv2.framework */,
4275AFCB15095A2C00D74063 /* libz.dylib */,
- 42171D1315BA6C37008BEA68 /* libstdc++.dylib */,
+ 4249F7AA16697A0900271FD5 /* libc++.dylib */,
);
name = Frameworks;
sourceTree = "<group>";
@@ -355,6 +355,8 @@
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
ARCHS = armv7;
+ CLANG_CXX_LANGUAGE_STANDARD = "c++0x";
+ CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_OBJC_ARC = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = NO;
@@ -370,10 +372,7 @@
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
- HEADER_SEARCH_PATHS = (
- "\"$(SRCROOT)/../libraries/OpenCV-2.3.1/modules/core/include\"/**",
- "\"$(SRCROOT)/../libraries/OpenCV-2.3.1/modules/imgproc/include\"/**",
- );
+ HEADER_SEARCH_PATHS = "";
IPHONEOS_DEPLOYMENT_TARGET = 5.1;
SDKROOT = iphoneos;
TARGETED_DEVICE_FAMILY = "1,2";
@@ -385,6 +384,8 @@
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
ARCHS = armv7;
+ CLANG_CXX_LANGUAGE_STANDARD = "c++0x";
+ CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_OBJC_ARC = YES;
"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
COPY_PHASE_STRIP = YES;
@@ -393,10 +394,7 @@
GCC_WARN_ABOUT_RETURN_TYPE = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
- HEADER_SEARCH_PATHS = (
- "\"$(SRCROOT)/../libraries/OpenCV-2.3.1/modules/core/include\"/**",
- "\"$(SRCROOT)/../libraries/OpenCV-2.3.1/modules/imgproc/include\"/**",
- );
+ HEADER_SEARCH_PATHS = "";
IPHONEOS_DEPLOYMENT_TARGET = 5.1;
OTHER_CFLAGS = "-DNS_BLOCK_ASSERTIONS=1";
SDKROOT = iphoneos;
View
2  opencv2.framework/Versions/A/Headers/calib3d/calib3d.hpp
@@ -91,7 +91,7 @@ enum
{
CV_ITERATIVE = 0,
CV_EPNP = 1, // F.Moreno-Noguer, V.Lepetit and P.Fua "EPnP: Efficient Perspective-n-Point Camera Pose Estimation"
- CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
+ CV_P3P = 2 // X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang; "Complete Solution Classification for the Perspective-Three-Point Problem"
};
CVAPI(int) cvFindFundamentalMat( const CvMat* points1, const CvMat* points2,
View
36 opencv2.framework/Versions/A/Headers/contrib/contrib.hpp
@@ -557,15 +557,15 @@ namespace cv
void* user_data;
};
- CV_EXPORTS int chamerMatching( Mat& img, Mat& templ,
- vector<vector<Point> >& results, vector<float>& cost,
+ CV_EXPORTS_W int chamerMatching( Mat& img, Mat& templ,
+ CV_OUT vector<vector<Point> >& results, CV_OUT vector<float>& cost,
double templScale=1, int maxMatches = 20,
double minMatchDistance = 1.0, int padX = 3,
int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
double orientationWeight = 0.5, double truncate = 20);
- class CV_EXPORTS StereoVar
+ class CV_EXPORTS_W StereoVar
{
public:
// Flags
@@ -872,7 +872,7 @@ namespace cv
// Optimization Criterion on given data in src and corresponding labels
// in labels. If 0 (or less) number of components are given, they are
// automatically determined for given data in computation.
- LDA(InputArray src, InputArray labels,
+ LDA(InputArrayOfArrays src, InputArray labels,
int num_components = 0) :
_num_components(num_components)
{
@@ -895,7 +895,7 @@ namespace cv
~LDA() {}
//! Compute the discriminants for data in src and labels.
- void compute(InputArray src, InputArray labels);
+ void compute(InputArrayOfArrays src, InputArray labels);
// Projects samples into the LDA subspace.
Mat project(InputArray src);
@@ -915,29 +915,32 @@ namespace cv
Mat _eigenvectors;
Mat _eigenvalues;
- void lda(InputArray src, InputArray labels);
+ void lda(InputArrayOfArrays src, InputArray labels);
};
- class CV_EXPORTS FaceRecognizer : public Algorithm
+ class CV_EXPORTS_W FaceRecognizer : public Algorithm
{
public:
//! virtual destructor
virtual ~FaceRecognizer() {}
// Trains a FaceRecognizer.
- virtual void train(InputArray src, InputArray labels) = 0;
+ CV_WRAP virtual void train(InputArrayOfArrays src, InputArray labels) = 0;
+
+ // Updates a FaceRecognizer.
+ CV_WRAP void update(InputArrayOfArrays src, InputArray labels);
// Gets a prediction from a FaceRecognizer.
virtual int predict(InputArray src) const = 0;
// Predicts the label and confidence for a given sample.
- virtual void predict(InputArray src, int &label, double &dist) const = 0;
+ CV_WRAP virtual void predict(InputArray src, CV_OUT int &label, CV_OUT double &confidence) const = 0;
// Serializes this object to a given filename.
- virtual void save(const string& filename) const;
+ CV_WRAP virtual void save(const string& filename) const;
// Deserializes this object from a given filename.
- virtual void load(const string& filename);
+ CV_WRAP virtual void load(const string& filename);
// Serializes this object to a given cv::FileStorage.
virtual void save(FileStorage& fs) const = 0;
@@ -947,9 +950,9 @@ namespace cv
};
- CV_EXPORTS Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
- CV_EXPORTS Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
- CV_EXPORTS Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
+ CV_EXPORTS_W Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
+ CV_EXPORTS_W Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components = 0, double threshold = DBL_MAX);
+ CV_EXPORTS_W Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius=1, int neighbors=8,
int grid_x=8, int grid_y=8, double threshold = DBL_MAX);
enum
@@ -968,14 +971,15 @@ namespace cv
COLORMAP_HOT = 11
};
- CV_EXPORTS void applyColorMap(InputArray src, OutputArray dst, int colormap);
+ CV_EXPORTS_W void applyColorMap(InputArray src, OutputArray dst, int colormap);
CV_EXPORTS bool initModule_contrib();
}
-
#include "opencv2/contrib/retina.hpp"
+#include "opencv2/contrib/openfabmap.hpp"
+
#endif
#endif
View
8 opencv2.framework/Versions/A/Headers/contrib/detection_based_tracker.hpp
@@ -34,7 +34,7 @@ class DetectionBasedTracker
bool setParameters(const Parameters& params);
const Parameters& getParameters();
-
+
typedef std::pair<cv::Rect, int> Object;
virtual void getObjects(std::vector<cv::Rect>& result) const;
virtual void getObjects(std::vector<Object>& result) const;
@@ -98,5 +98,9 @@ class DetectionBasedTracker
void detectInRegion(const cv::Mat& img, const cv::Rect& r, std::vector<cv::Rect>& detectedObjectsInRegions);
};
-#endif
+namespace cv
+{
+ using ::DetectionBasedTracker;
+} //end of cv namespace
+#endif
View
405 opencv2.framework/Versions/A/Headers/contrib/openfabmap.hpp
@@ -0,0 +1,405 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+// This file originates from the openFABMAP project:
+// [http://code.google.com/p/openfabmap/]
+//
+// For published work which uses all or part of OpenFABMAP, please cite:
+// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6224843]
+//
+// Original Algorithm by Mark Cummins and Paul Newman:
+// [http://ijr.sagepub.com/content/27/6/647.short]
+// [http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942]
+// [http://ijr.sagepub.com/content/30/9/1100.abstract]
+//
+// License Agreement
+//
+// Copyright (C) 2012 Arren Glover [aj.glover@qut.edu.au] and
+// Will Maddern [w.maddern@qut.edu.au], all rights reserved.
+//
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OPENFABMAP_H_
+#define __OPENCV_OPENFABMAP_H_
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/features2d/features2d.hpp"
+
+#include <vector>
+#include <list>
+#include <map>
+#include <set>
+#include <valarray>
+
+namespace cv {
+
+namespace of2 {
+
+using std::list;
+using std::map;
+using std::multiset;
+
+/*
+ Return data format of a FABMAP compare call
+*/
+struct CV_EXPORTS IMatch {
+
+ IMatch() :
+ queryIdx(-1), imgIdx(-1), likelihood(-DBL_MAX), match(-DBL_MAX) {
+ }
+ IMatch(int _queryIdx, int _imgIdx, double _likelihood, double _match) :
+ queryIdx(_queryIdx), imgIdx(_imgIdx), likelihood(_likelihood), match(
+ _match) {
+ }
+
+ int queryIdx; //query index
+ int imgIdx; //test index
+
+ double likelihood; //raw loglikelihood
+ double match; //normalised probability
+
+ bool operator<(const IMatch& m) const {
+ return match < m.match;
+ }
+
+};
+
+/*
+ Base FabMap class. Each FabMap method inherits from this class.
+*/
+class CV_EXPORTS FabMap {
+public:
+
+ //FabMap options
+ enum {
+ MEAN_FIELD = 1,
+ SAMPLED = 2,
+ NAIVE_BAYES = 4,
+ CHOW_LIU = 8,
+ MOTION_MODEL = 16
+ };
+
+ FabMap(const Mat& clTree, double PzGe, double PzGNe, int flags,
+ int numSamples = 0);
+ virtual ~FabMap();
+
+ //methods to add training data for sampling method
+ virtual void addTraining(const Mat& queryImgDescriptor);
+ virtual void addTraining(const vector<Mat>& queryImgDescriptors);
+
+ //methods to add to the test data
+ virtual void add(const Mat& queryImgDescriptor);
+ virtual void add(const vector<Mat>& queryImgDescriptors);
+
+ //accessors
+ const vector<Mat>& getTrainingImgDescriptors() const;
+ const vector<Mat>& getTestImgDescriptors() const;
+
+ //Main FabMap image comparison
+ void compare(const Mat& queryImgDescriptor,
+ vector<IMatch>& matches, bool addQuery = false,
+ const Mat& mask = Mat());
+ void compare(const Mat& queryImgDescriptor,
+ const Mat& testImgDescriptors, vector<IMatch>& matches,
+ const Mat& mask = Mat());
+ void compare(const Mat& queryImgDescriptor,
+ const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches, const Mat& mask = Mat());
+ void compare(const vector<Mat>& queryImgDescriptors, vector<
+ IMatch>& matches, bool addQuery = false, const Mat& mask =
+ Mat());
+ void compare(const vector<Mat>& queryImgDescriptors,
+ const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches, const Mat& mask = Mat());
+
+protected:
+
+ void compareImgDescriptor(const Mat& queryImgDescriptor,
+ int queryIndex, const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches);
+
+ void addImgDescriptor(const Mat& queryImgDescriptor);
+
+ //the getLikelihoods method is overwritten for each different FabMap
+ //method.
+ virtual void getLikelihoods(const Mat& queryImgDescriptor,
+ const vector<Mat>& testImgDescriptors,
+ vector<IMatch>& matches);
+ virtual double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
+
+ //turn likelihoods into probabilities (also add in motion model if used)
+ void normaliseDistribution(vector<IMatch>& matches);
+
+ //Chow-Liu Tree
+ int pq(int q);
+ double Pzq(int q, bool zq);
+ double PzqGzpq(int q, bool zq, bool zpq);
+
+ //FAB-MAP Core
+ double PzqGeq(bool zq, bool eq);
+ double PeqGL(int q, bool Lzq, bool eq);
+ double PzqGL(int q, bool zq, bool zpq, bool Lzq);
+ double PzqGzpqL(int q, bool zq, bool zpq, bool Lzq);
+ double (FabMap::*PzGL)(int q, bool zq, bool zpq, bool Lzq);
+
+ //data
+ Mat clTree;
+ vector<Mat> trainingImgDescriptors;
+ vector<Mat> testImgDescriptors;
+ vector<IMatch> priorMatches;
+
+ //parameters
+ double PzGe;
+ double PzGNe;
+ double Pnew;
+
+ double mBias;
+ double sFactor;
+
+ int flags;
+ int numSamples;
+
+};
+
+/*
+ The original FAB-MAP algorithm, developed based on:
+ http://ijr.sagepub.com/content/27/6/647.short
+*/
+class CV_EXPORTS FabMap1: public FabMap {
+public:
+ FabMap1(const Mat& clTree, double PzGe, double PzGNe, int flags,
+ int numSamples = 0);
+ virtual ~FabMap1();
+protected:
+
+ //FabMap1 implementation of likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+};
+
+/*
+ A computationally faster version of the original FAB-MAP algorithm. A look-
+ up-table is used to precompute many of the reoccuring calculations
+*/
+class CV_EXPORTS FabMapLUT: public FabMap {
+public:
+ FabMapLUT(const Mat& clTree, double PzGe, double PzGNe,
+ int flags, int numSamples = 0, int precision = 6);
+ virtual ~FabMapLUT();
+protected:
+
+ //FabMap look-up-table implementation of the likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+
+ //precomputed data
+ int (*table)[8];
+
+ //data precision
+ int precision;
+};
+
+/*
+ The Accelerated FAB-MAP algorithm, developed based on:
+ http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=5613942
+*/
+class CV_EXPORTS FabMapFBO: public FabMap {
+public:
+ FabMapFBO(const Mat& clTree, double PzGe, double PzGNe, int flags,
+ int numSamples = 0, double rejectionThreshold = 1e-8, double PsGd =
+ 1e-8, int bisectionStart = 512, int bisectionIts = 9);
+ virtual ~FabMapFBO();
+
+protected:
+
+ //FabMap Fast Bail-out implementation of the likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+
+ //stucture used to determine word comparison order
+ struct WordStats {
+ WordStats() :
+ q(0), info(0), V(0), M(0) {
+ }
+
+ WordStats(int _q, double _info) :
+ q(_q), info(_info), V(0), M(0) {
+ }
+
+ int q;
+ double info;
+ mutable double V;
+ mutable double M;
+
+ bool operator<(const WordStats& w) const {
+ return info < w.info;
+ }
+
+ };
+
+ //private fast bail-out necessary functions
+ void setWordStatistics(const Mat& queryImgDescriptor, multiset<WordStats>& wordData);
+ double limitbisection(double v, double m);
+ double bennettInequality(double v, double m, double delta);
+ static bool compInfo(const WordStats& first, const WordStats& second);
+
+ //parameters
+ double PsGd;
+ double rejectionThreshold;
+ int bisectionStart;
+ int bisectionIts;
+};
+
+/*
+ The FAB-MAP2.0 algorithm, developed based on:
+ http://ijr.sagepub.com/content/30/9/1100.abstract
+*/
+class CV_EXPORTS FabMap2: public FabMap {
+public:
+
+ FabMap2(const Mat& clTree, double PzGe, double PzGNe, int flags);
+ virtual ~FabMap2();
+
+ //FabMap2 builds the inverted index and requires an additional training/test
+ //add function
+ void addTraining(const Mat& queryImgDescriptors) {
+ FabMap::addTraining(queryImgDescriptors);
+ }
+ void addTraining(const vector<Mat>& queryImgDescriptors);
+
+ void add(const Mat& queryImgDescriptors) {
+ FabMap::add(queryImgDescriptors);
+ }
+ void add(const vector<Mat>& queryImgDescriptors);
+
+protected:
+
+ //FabMap2 implementation of the likelihood comparison
+ void getLikelihoods(const Mat& queryImgDescriptor, const vector<
+ Mat>& testImgDescriptors, vector<IMatch>& matches);
+ double getNewPlaceLikelihood(const Mat& queryImgDescriptor);
+
+ //the likelihood function using the inverted index
+ void getIndexLikelihoods(const Mat& queryImgDescriptor, vector<
+ double>& defaults, map<int, vector<int> >& invertedMap,
+ vector<IMatch>& matches);
+ void addToIndex(const Mat& queryImgDescriptor,
+ vector<double>& defaults,
+ map<int, vector<int> >& invertedMap);
+
+ //data
+ vector<double> d1, d2, d3, d4;
+ vector<vector<int> > children;
+
+ // TODO: inverted map a vector?
+
+ vector<double> trainingDefaults;
+ map<int, vector<int> > trainingInvertedMap;
+
+ vector<double> testDefaults;
+ map<int, vector<int> > testInvertedMap;
+
+};
+/*
+ A Chow-Liu tree is required by FAB-MAP. The Chow-Liu tree provides an
+ estimate of the full distribution of visual words using a minimum spanning
+ tree. The tree is generated through training data.
+*/
+class CV_EXPORTS ChowLiuTree {
+public:
+ ChowLiuTree();
+ virtual ~ChowLiuTree();
+
+ //add data to the chow-liu tree before calling make
+ void add(const Mat& imgDescriptor);
+ void add(const vector<Mat>& imgDescriptors);
+
+ const vector<Mat>& getImgDescriptors() const;
+
+ Mat make(double infoThreshold = 0.0);
+
+private:
+ vector<Mat> imgDescriptors;
+ Mat mergedImgDescriptors;
+
+ typedef struct info {
+ float score;
+ short word1;
+ short word2;
+ } info;
+
+ //probabilities extracted from mergedImgDescriptors
+ double P(int a, bool za);
+ double JP(int a, bool za, int b, bool zb); //a & b
+ double CP(int a, bool za, int b, bool zb); // a | b
+
+ //calculating mutual information of all edges
+ void createBaseEdges(list<info>& edges, double infoThreshold);
+ double calcMutInfo(int word1, int word2);
+ static bool sortInfoScores(const info& first, const info& second);
+
+ //selecting minimum spanning egdges with maximum information
+ bool reduceEdgesToMinSpan(list<info>& edges);
+
+ //building the tree sctructure
+ Mat buildTree(int root_word, list<info> &edges);
+ void recAddToTree(Mat &cltree, int q, int pq,
+ list<info> &remaining_edges);
+ vector<int> extractChildren(list<info> &remaining_edges, int q);
+
+};
+
+/*
+ A custom vocabulary training method based on:
+ http://www.springerlink.com/content/d1h6j8x552532003/
+*/
+class CV_EXPORTS BOWMSCTrainer: public BOWTrainer {
+public:
+ BOWMSCTrainer(double clusterSize = 0.4);
+ virtual ~BOWMSCTrainer();
+
+ // Returns trained vocabulary (i.e. cluster centers).
+ virtual Mat cluster() const;
+ virtual Mat cluster(const Mat& descriptors) const;
+
+protected:
+
+ double clusterSize;
+
+};
+
+}
+
+}
+
+#endif /* OPENFABMAP_H_ */
View
368 opencv2.framework/Versions/A/Headers/contrib/retina.hpp
@@ -1,55 +1,55 @@
/*#******************************************************************************
** IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
- **
+ **
** By downloading, copying, installing or using the software you agree to this license.
** If you do not agree to this license, do not download, install,
** copy or use the software.
- **
- **
+ **
+ **
** HVStools : interfaces allowing OpenCV users to integrate Human Vision System models. Presented models originate from Jeanny Herault's original research and have been reused and adapted by the author&collaborators for computed vision applications since his thesis with Alice Caplier at Gipsa-Lab.
** Use: extract still images & image sequences features, from contours details to motion spatio-temporal features, etc. for high level visual scene analysis. Also contribute to image enhancement/compression such as tone mapping.
- **
+ **
** Maintainers : Listic lab (code author current affiliation & applications) and Gipsa Lab (original research origins & applications)
- **
+ **
** Creation - enhancement process 2007-2011
** Author: Alexandre Benoit (benoit.alexandre.vision@gmail.com), LISTIC lab, Annecy le vieux, France
- **
+ **
** Theses algorithm have been developped by Alexandre BENOIT since his thesis with Alice Caplier at Gipsa-Lab (www.gipsa-lab.inpg.fr) and the research he pursues at LISTIC Lab (www.listic.univ-savoie.fr).
** Refer to the following research paper for more information:
** Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
** This work have been carried out thanks to Jeanny Herault who's research and great discussions are the basis of all this work, please take a look at his book:
** Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891.
- **
+ **
** The retina filter includes the research contributions of phd/research collegues from which code has been redrawn by the author :
** _take a look at the retinacolor.hpp module to discover Brice Chaix de Lavarene color mosaicing/demosaicing and the reference paper:
** ====> B. Chaix de Lavarene, D. Alleysson, B. Durette, J. Herault (2007). "Efficient demosaicing through recursive filtering", IEEE International Conference on Image Processing ICIP 2007
** _take a look at imagelogpolprojection.hpp to discover retina spatial log sampling which originates from Barthelemy Durette phd with Jeanny Herault. A Retina / V1 cortex projection is also proposed and originates from Jeanny's discussions.
** ====> more informations in the above cited Jeanny Heraults's book.
- **
+ **
** License Agreement
** For Open Source Computer Vision Library
- **
+ **
** Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
** Copyright (C) 2008-2011, Willow Garage Inc., all rights reserved.
- **
+ **
** For Human Visual System tools (hvstools)
** Copyright (C) 2007-2011, LISTIC Lab, Annecy le Vieux and GIPSA Lab, Grenoble, France, all rights reserved.
- **
+ **
** Third party copyrights are property of their respective owners.
- **
+ **
** Redistribution and use in source and binary forms, with or without modification,
** are permitted provided that the following conditions are met:
- **
+ **
** * Redistributions of source code must retain the above copyright notice,
** this list of conditions and the following disclaimer.
- **
+ **
** * Redistributions in binary form must reproduce the above copyright notice,
** this list of conditions and the following disclaimer in the documentation
** and/or other materials provided with the distribution.
- **
+ **
** * The name of the copyright holders may not be used to endorse or promote products
** derived from this software without specific prior written permission.
- **
+ **
** This software is provided by the copyright holders and contributors "as is" and
** any express or implied warranties, including, but not limited to, the implied
** warranties of merchantability and fitness for a particular purpose are disclaimed.
@@ -80,9 +80,9 @@ namespace cv
enum RETINA_COLORSAMPLINGMETHOD
{
- RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice
- RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
- RETINA_COLOR_BAYER//!< standard bayer sampling
+ RETINA_COLOR_RANDOM, //!< each pixel position is either R, G or B in a random choice
+ RETINA_COLOR_DIAGONAL,//!< color sampling is RGBRGBRGB..., line 2 BRGBRGBRG..., line 3, GBRGBRGBR...
+ RETINA_COLOR_BAYER//!< standard bayer sampling
};
class RetinaFilter;
@@ -114,9 +114,9 @@ class CV_EXPORTS Retina {
public:
- // parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel
- struct RetinaParameters{
- struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
+ // parameters structure for better clarity, check explenations on the comments of methods : setupOPLandIPLParvoChannel and setupIPLMagnoChannel
+ struct RetinaParameters{
+ struct OPLandIplParvoParameters{ // Outer Plexiform Layer (OPL) and Inner Plexiform Layer Parvocellular (IplParvo) parameters
OPLandIplParvoParameters():colorMode(true),
normaliseOutput(true),
photoreceptorsLocalAdaptationSensitivity(0.7f),
@@ -144,166 +144,166 @@ class CV_EXPORTS Retina {
};
struct OPLandIplParvoParameters OPLandIplParvo;
struct IplMagnoParameters IplMagno;
- };
+ };
- /**
- * Main constructor with most commun use setup : create an instance of color ready retina model
- * @param inputSize : the input frame size
- */
- Retina(Size inputSize);
+ /**
+ * Main constructor with most commun use setup : create an instance of color ready retina model
+ * @param inputSize : the input frame size
+ */
+ Retina(Size inputSize);
- /**
- * Complete Retina filter constructor which allows all basic structural parameters definition
+ /**
+ * Complete Retina filter constructor which allows all basic structural parameters definition
* @param inputSize : the input frame size
- * @param colorMode : the chosen processing mode : with or without color processing
- * @param colorSamplingMethod: specifies which kind of color sampling will be used
- * @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used
- * @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak
- * @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied
- */
- Retina(Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
-
- virtual ~Retina();
-
- /**
- * retreive retina input buffer size
+ * @param colorMode : the chosen processing mode : with or without color processing
+ * @param colorSamplingMethod: specifies which kind of color sampling will be used
+ * @param useRetinaLogSampling: activate retina log sampling, if true, the 2 following parameters can be used
+ * @param reductionFactor: only usefull if param useRetinaLogSampling=true, specifies the reduction factor of the output frame (as the center (fovea) is high resolution and corners can be underscaled, then a reduction of the output is allowed without precision leak
+ * @param samplingStrenght: only usefull if param useRetinaLogSampling=true, specifies the strenght of the log scale that is applied
+ */
+ Retina(Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
+
+ virtual ~Retina();
+
+ /**
+ * retreive retina input buffer size
*/
Size inputSize();
- /**
- * retreive retina output buffer size
+ /**
+ * retreive retina output buffer size
*/
Size outputSize();
- /**
- * try to open an XML retina parameters file to adjust current retina instance setup
- * => if the xml file does not exist, then default setup is applied
- * => warning, Exceptions are thrown if read XML file is not valid
- * @param retinaParameterFile : the parameters filename
+ /**
+ * try to open an XML retina parameters file to adjust current retina instance setup
+ * => if the xml file does not exist, then default setup is applied
+ * => warning, Exceptions are thrown if read XML file is not valid
+ * @param retinaParameterFile : the parameters filename
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
- */
- void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true);
-
-
- /**
- * try to open an XML retina parameters file to adjust current retina instance setup
- * => if the xml file does not exist, then default setup is applied
- * => warning, Exceptions are thrown if read XML file is not valid
- * @param fs : the open Filestorage which contains retina parameters
+ */
+ void setup(std::string retinaParameterFile="", const bool applyDefaultSetupOnFailure=true);
+
+
+ /**
+ * try to open an XML retina parameters file to adjust current retina instance setup
+ * => if the xml file does not exist, then default setup is applied
+ * => warning, Exceptions are thrown if read XML file is not valid
+ * @param fs : the open Filestorage which contains retina parameters
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
- */
+ */
void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure=true);
- /**
- * try to open an XML retina parameters file to adjust current retina instance setup
- * => if the xml file does not exist, then default setup is applied
- * => warning, Exceptions are thrown if read XML file is not valid
- * @param newParameters : a parameters structures updated with the new target configuration
+ /**
+ * try to open an XML retina parameters file to adjust current retina instance setup
+ * => if the xml file does not exist, then default setup is applied
+ * => warning, Exceptions are thrown if read XML file is not valid
+ * @param newParameters : a parameters structures updated with the new target configuration
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
- */
- void setup(RetinaParameters newParameters);
+ */
+ void setup(RetinaParameters newParameters);
/**
* @return the current parameters setup
*/
struct Retina::RetinaParameters getParameters();
- /**
- * parameters setup display method
- * @return a string which contains formatted parameters information
- */
- const std::string printSetup();
+ /**
+ * parameters setup display method
+ * @return a string which contains formatted parameters information
+ */
+ const std::string printSetup();
- /**
- * write xml/yml formated parameters information
- * @rparam fs : the filename of the xml file that will be open and writen with formatted parameters information
- */
- virtual void write( std::string fs ) const;
+ /**
+ * write xml/yml formated parameters information
+ * @rparam fs : the filename of the xml file that will be open and writen with formatted parameters information
+ */
+ virtual void write( std::string fs ) const;
- /**
- * write xml/yml formated parameters information
- * @param fs : a cv::Filestorage object ready to be filled
+ /**
+ * write xml/yml formated parameters information
+ * @param fs : a cv::Filestorage object ready to be filled
*/
- virtual void write( FileStorage& fs ) const;
-
- /**
- * setup the OPL and IPL parvo channels (see biologocal model)
- * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy)
- * IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision.
- * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
- * @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image
- * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false)
- * @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases)
- * @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame
- * @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel
- * @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0
- * @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors
- * @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model)
- * @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230
- */
- void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7, const float photoreceptorsTemporalConstant=0.5, const float photoreceptorsSpatialConstant=0.53, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7);
-
- /**
- * set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
- * this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details.
- * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false)
- * @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0
- * @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response)
- * @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5
- * @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5
- * @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200
- * @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
- * @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
- */
- void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2, const float V0CompressionParameter=0.95, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7);
-
- /**
- * method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods
- * @param inputImage : the input cv::Mat image to be processed, can be gray level or BGR coded in any format (from 8bit to 16bits)
- */
- void run(const Mat &inputImage);
-
- /**
- * accessor of the details channel of the retina (models foveal vision)
- * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV
- */
- void getParvo(Mat &retinaOutput_parvo);
-
- /**
- * accessor of the details channel of the retina (models foveal vision)
- * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling
- */
- void getParvo(std::valarray<float> &retinaOutput_parvo);
-
- /**
- * accessor of the motion channel of the retina (models peripheral vision)
- * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV
- */
- void getMagno(Mat &retinaOutput_magno);
-
- /**
- * accessor of the motion channel of the retina (models peripheral vision)
- * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling
- */
- void getMagno(std::valarray<float> &retinaOutput_magno);
-
- // original API level data accessors : get buffers addresses...
- const std::valarray<float> & getMagno() const;
- const std::valarray<float> & getParvo() const;
-
- /**
- * activate color saturation as the final step of the color demultiplexing process
- * -> this saturation is a sigmoide function applied to each channel of the demultiplexed image.
- * @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false)
- * @param colorSaturationValue: the saturation factor
- */
- void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0);
-
- /**
- * clear all retina buffers (equivalent to opening the eyes after a long period of eye close ;o)
- */
- void clearBuffers();
+ virtual void write( FileStorage& fs ) const;
+
+ /**
+ * setup the OPL and IPL parvo channels (see biologocal model)
+ * OPL is referred as Outer Plexiform Layer of the retina, it allows the spatio-temporal filtering which withens the spectrum and reduces spatio-temporal noise while attenuating global luminance (low frequency energy)
+ * IPL parvo is the OPL next processing stage, it refers to Inner Plexiform layer of the retina, it allows high contours sensitivity in foveal vision.
+ * for more informations, please have a look at the paper Benoit A., Caplier A., Durette B., Herault, J., "USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011
+ * @param colorMode : specifies if (true) color is processed of not (false) to then processing gray level image
+ * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false)
+ * @param photoreceptorsLocalAdaptationSensitivity: the photoreceptors sensitivity renage is 0-1 (more log compression effect when value increases)
+ * @param photoreceptorsTemporalConstant: the time constant of the first order low pass filter of the photoreceptors, use it to cut high temporal frequencies (noise or fast motion), unit is frames, typical value is 1 frame
+ * @param photoreceptorsSpatialConstant: the spatial constant of the first order low pass filter of the photoreceptors, use it to cut high spatial frequencies (noise or thick contours), unit is pixels, typical value is 1 pixel
+ * @param horizontalCellsGain: gain of the horizontal cells network, if 0, then the mean value of the output is zero, if the parameter is near 1, then, the luminance is not filtered and is still reachable at the output, typicall value is 0
+ * @param HcellsTemporalConstant: the time constant of the first order low pass filter of the horizontal cells, use it to cut low temporal frequencies (local luminance variations), unit is frames, typical value is 1 frame, as the photoreceptors
+ * @param HcellsSpatialConstant: the spatial constant of the first order low pass filter of the horizontal cells, use it to cut low spatial frequencies (local luminance), unit is pixels, typical value is 5 pixel, this value is also used for local contrast computing when computing the local contrast adaptation at the ganglion cells level (Inner Plexiform Layer parvocellular channel model)
+ * @param ganglionCellsSensitivity: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 230
+ */
+ void setupOPLandIPLParvoChannel(const bool colorMode=true, const bool normaliseOutput = true, const float photoreceptorsLocalAdaptationSensitivity=0.7, const float photoreceptorsTemporalConstant=0.5, const float photoreceptorsSpatialConstant=0.53, const float horizontalCellsGain=0, const float HcellsTemporalConstant=1, const float HcellsSpatialConstant=7, const float ganglionCellsSensitivity=0.7);
+
+ /**
+ * set parameters values for the Inner Plexiform Layer (IPL) magnocellular channel
+ * this channel processes signals outpint from OPL processing stage in peripheral vision, it allows motion information enhancement. It is decorrelated from the details channel. See reference paper for more details.
+ * @param normaliseOutput : specifies if (true) output is rescaled between 0 and 255 of not (false)
+ * @param parasolCells_beta: the low pass filter gain used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), typical value is 0
+ * @param parasolCells_tau: the low pass filter time constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is frame, typical value is 0 (immediate response)
+ * @param parasolCells_k: the low pass filter spatial constant used for local contrast adaptation at the IPL level of the retina (for ganglion cells local adaptation), unit is pixels, typical value is 5
+ * @param amacrinCellsTemporalCutFrequency: the time constant of the first order high pass fiter of the magnocellular way (motion information channel), unit is frames, tipicall value is 5
+ * @param V0CompressionParameter: the compression strengh of the ganglion cells local adaptation output, set a value between 160 and 250 for best results, a high value increases more the low value sensitivity... and the output saturates faster, recommended value: 200
+ * @param localAdaptintegration_tau: specifies the temporal constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
+ * @param localAdaptintegration_k: specifies the spatial constant of the low pas filter involved in the computation of the local "motion mean" for the local adaptation computation
+ */
+ void setupIPLMagnoChannel(const bool normaliseOutput = true, const float parasolCells_beta=0, const float parasolCells_tau=0, const float parasolCells_k=7, const float amacrinCellsTemporalCutFrequency=1.2, const float V0CompressionParameter=0.95, const float localAdaptintegration_tau=0, const float localAdaptintegration_k=7);
+
+ /**
+ * method which allows retina to be applied on an input image, after run, encapsulated retina module is ready to deliver its outputs using dedicated acccessors, see getParvo and getMagno methods
+ * @param inputImage : the input cv::Mat image to be processed, can be gray level or BGR coded in any format (from 8bit to 16bits)
+ */
+ void run(const Mat &inputImage);
+
+ /**
+ * accessor of the details channel of the retina (models foveal vision)
+ * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV
+ */
+ void getParvo(Mat &retinaOutput_parvo);
+
+ /**
+ * accessor of the details channel of the retina (models foveal vision)
+ * @param retinaOutput_parvo : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling
+ */
+ void getParvo(std::valarray<float> &retinaOutput_parvo);
+
+ /**
+ * accessor of the motion channel of the retina (models peripheral vision)
+ * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is rescaled for standard 8bits image processing use in OpenCV
+ */
+ void getMagno(Mat &retinaOutput_magno);
+
+ /**
+ * accessor of the motion channel of the retina (models peripheral vision)
+ * @param retinaOutput_magno : the output buffer (reallocated if necessary), this output is the original retina filter model output, without any quantification or rescaling
+ */
+ void getMagno(std::valarray<float> &retinaOutput_magno);
+
+ // original API level data accessors : get buffers addresses...
+ const std::valarray<float> & getMagno() const;
+ const std::valarray<float> & getParvo() const;
+
+ /**
+ * activate color saturation as the final step of the color demultiplexing process
+ * -> this saturation is a sigmoide function applied to each channel of the demultiplexed image.
+ * @param saturateColors: boolean that activates color saturation (if true) or desactivate (if false)
+ * @param colorSaturationValue: the saturation factor
+ */
+ void setColorSaturation(const bool saturateColors=true, const float colorSaturationValue=4.0);
+
+ /**
+ * clear all retina buffers (equivalent to opening the eyes after a long period of eye close ;o)
+ */
+ void clearBuffers();
/**
* Activate/desactivate the Magnocellular pathway processing (motion information extraction), by default, it is activated
@@ -318,35 +318,35 @@ class CV_EXPORTS Retina {
void activateContoursProcessing(const bool activate);
protected:
- // Parameteres setup members
- RetinaParameters _retinaParameters; // structure of parameters
-
+ // Parameteres setup members
+ RetinaParameters _retinaParameters; // structure of parameters
+
// Retina model related modules
- std::valarray<float> _inputBuffer; //!< buffer used to convert input cv::Mat to internal retina buffers format (valarrays)
-
- // pointer to retina model
- RetinaFilter* _retinaFilter; //!< the pointer to the retina module, allocated with instance construction
-
- /**
- * exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format
- * @param grayMatrixToConvert the valarray to export to OpenCV
- * @param nbRows : the number of rows of the valarray flatten matrix
- * @param nbColumns : the number of rows of the valarray flatten matrix
- * @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false)
- * @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions
- */
- void _convertValarrayBuffer2cvMat(const std::valarray<float> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer);
-
- /**
- *
- * @param inputMatToConvert : the OpenCV cv::Mat that has to be converted to gray or RGB valarray buffer that will be processed by the retina model
- * @param outputValarrayMatrix : the output valarray
- * @return the input image color mode (color=true, gray levels=false)
- */
+ std::valarray<float> _inputBuffer; //!< buffer used to convert input cv::Mat to internal retina buffers format (valarrays)
+
+ // pointer to retina model
+ RetinaFilter* _retinaFilter; //!< the pointer to the retina module, allocated with instance construction
+
+ /**
+ * exports a valarray buffer outing from HVStools objects to a cv::Mat in CV_8UC1 (gray level picture) or CV_8UC3 (color) format
+ * @param grayMatrixToConvert the valarray to export to OpenCV
+ * @param nbRows : the number of rows of the valarray flatten matrix
+ * @param nbColumns : the number of rows of the valarray flatten matrix
+ * @param colorMode : a flag which mentions if matrix is color (true) or graylevel (false)
+ * @param outBuffer : the output matrix which is reallocated to satisfy Retina output buffer dimensions
+ */
+ void _convertValarrayBuffer2cvMat(const std::valarray<float> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, Mat &outBuffer);
+
+ /**
+ *
+ * @param inputMatToConvert : the OpenCV cv::Mat that has to be converted to gray or RGB valarray buffer that will be processed by the retina model
+ * @param outputValarrayMatrix : the output valarray
+ * @return the input image color mode (color=true, gray levels=false)
+ */
bool _convertCvMat2ValarrayBuffer(const cv::Mat inputMatToConvert, std::valarray<float> &outputValarrayMatrix);
- //! private method called by constructors, gathers their parameters and use them in a unified way
- void _init(const Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
+ //! private method called by constructors, gathers their parameters and use them in a unified way
+ void _init(const Size inputSize, const bool colorMode, RETINA_COLORSAMPLINGMETHOD colorSamplingMethod=RETINA_COLOR_BAYER, const bool useRetinaLogSampling=false, const double reductionFactor=1.0, const double samplingStrenght=10.0);
};
View
179 opencv2.framework/Versions/A/Headers/core/core.hpp
@@ -221,7 +221,7 @@ CV_EXPORTS void setNumThreads(int nthreads);
CV_EXPORTS int getNumThreads();
CV_EXPORTS int getThreadNum();
-CV_EXPORTS_W const std::string& getBuildInformation();
+CV_EXPORTS_W const string& getBuildInformation();
//! Returns the number of ticks.
@@ -440,7 +440,7 @@ template<typename _Tp, int m, int n> class CV_EXPORTS Matx
{
public:
typedef _Tp value_type;
- typedef Matx<_Tp, MIN(m, n), 1> diag_type;
+ typedef Matx<_Tp, (m < n ? m : n), 1> diag_type;
typedef Matx<_Tp, m, n> mat_type;
enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols,
type = CV_MAKETYPE(depth, channels) };
@@ -1107,6 +1107,18 @@ template<> class DataType<double>
type = CV_MAKETYPE(depth, channels) };
};
+template<typename _Tp, int m, int n> class DataType<Matx<_Tp, m, n> >
+{
+public:
+ typedef Matx<_Tp, m, n> value_type;
+ typedef Matx<typename DataType<_Tp>::work_type, m, n> work_type;
+ typedef _Tp channel_type;
+ typedef value_type vec_type;
+ enum { generic_type = 0, depth = DataDepth<channel_type>::value, channels = m*n,
+ fmt = ((channels-1)<<8) + DataDepth<channel_type>::fmt,
+ type = CV_MAKETYPE(depth, channels) };
+};
+
template<typename _Tp, int cn> class DataType<Vec<_Tp, cn> >
{
public:
@@ -1374,6 +1386,7 @@ class CV_EXPORTS _OutputArray : public _InputArray
template<typename _Tp> _OutputArray(Mat_<_Tp>& m);
template<typename _Tp, int m, int n> _OutputArray(Matx<_Tp, m, n>& matx);
template<typename _Tp> _OutputArray(_Tp* vec, int n);
+ _OutputArray(gpu::GpuMat& d_mat);
_OutputArray(const Mat& m);
template<typename _Tp> _OutputArray(const vector<_Tp>& vec);
@@ -1383,11 +1396,13 @@ class CV_EXPORTS _OutputArray : public _InputArray
template<typename _Tp> _OutputArray(const Mat_<_Tp>& m);
template<typename _Tp, int m, int n> _OutputArray(const Matx<_Tp, m, n>& matx);
template<typename _Tp> _OutputArray(const _Tp* vec, int n);
+ _OutputArray(const gpu::GpuMat& d_mat);
virtual bool fixedSize() const;
virtual bool fixedType() const;
virtual bool needed() const;
virtual Mat& getMatRef(int i=-1) const;
+ /*virtual*/ gpu::GpuMat& getGpuMatRef() const;
virtual void create(Size sz, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
@@ -2091,6 +2106,9 @@ CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst,
CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src);
//! computes the number of nonzero array elements
CV_EXPORTS_W int countNonZero( InputArray src );
+//! returns the list of locations of non-zero pixels
+CV_EXPORTS_W void findNonZero( InputArray src, OutputArray idx );
+
//! computes mean value of selected array elements
CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray());
//! computes mean value and standard deviation of all or selected array elements
@@ -2125,13 +2143,17 @@ CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, in
//! makes multi-channel array out of several single-channel arrays
CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst);
+CV_EXPORTS void merge(const vector<Mat>& mv, OutputArray dst );
+
//! makes multi-channel array out of several single-channel arrays
-CV_EXPORTS_W void merge(const vector<Mat>& mv, OutputArray dst);
+CV_EXPORTS_W void merge(InputArrayOfArrays mv, OutputArray dst);
//! copies each plane of a multi-channel array to a dedicated array
CV_EXPORTS void split(const Mat& src, Mat* mvbegin);
+CV_EXPORTS void split(const Mat& m, vector<Mat>& mv );
+
//! copies each plane of a multi-channel array to a dedicated array
-CV_EXPORTS_W void split(const Mat& m, CV_OUT vector<Mat>& mv);
+CV_EXPORTS_W void split(InputArray m, OutputArrayOfArrays mv);
//! copies selected channels from the input arrays to the selected channels of the output arrays
CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts,
@@ -2261,10 +2283,10 @@ CV_EXPORTS_W bool solve(InputArray src1, InputArray src2,
enum
{
- SORT_EVERY_ROW=0,
- SORT_EVERY_COLUMN=1,
- SORT_ASCENDING=0,
- SORT_DESCENDING=16
+ SORT_EVERY_ROW=0,
+ SORT_EVERY_COLUMN=1,
+ SORT_ASCENDING=0,
+ SORT_DESCENDING=16
};
//! sorts independently each matrix row or each matrix column
@@ -2287,12 +2309,12 @@ CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors,
enum
{
- COVAR_SCRAMBLED=0,
- COVAR_NORMAL=1,
- COVAR_USE_AVG=2,
- COVAR_SCALE=4,
- COVAR_ROWS=8,
- COVAR_COLS=16
+ COVAR_SCRAMBLED=0,
+ COVAR_NORMAL=1,
+ COVAR_USE_AVG=2,
+ COVAR_SCALE=4,
+ COVAR_ROWS=8,
+ COVAR_COLS=16
};
//! computes covariation matrix of a set of samples
@@ -2363,8 +2385,10 @@ class CV_EXPORTS PCA
PCA();
//! the constructor that performs PCA
PCA(InputArray data, InputArray mean, int flags, int maxComponents=0);
+ PCA(InputArray data, InputArray mean, int flags, double retainedVariance);
//! operator that performs PCA. The previously stored data, if any, is released
PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0);
+ PCA& computeVar(InputArray data, InputArray mean, int flags, double retainedVariance);
//! projects vector from the original space to the principal components subspace
Mat project(InputArray vec) const;
//! projects vector from the original space to the principal components subspace
@@ -2382,6 +2406,9 @@ class CV_EXPORTS PCA
CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean,
OutputArray eigenvectors, int maxComponents=0);
+CV_EXPORTS_W void PCAComputeVar(InputArray data, CV_OUT InputOutputArray mean,
+ OutputArray eigenvectors, double retainedVariance);
+
CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean,
InputArray eigenvectors, OutputArray result);
@@ -2496,32 +2523,32 @@ CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng
CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.);
//! draws the line segment (pt1, pt2) in the image
-CV_EXPORTS_W void line(Mat& img, Point pt1, Point pt2, const Scalar& color,
+CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
int thickness=1, int lineType=8, int shift=0);
//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
-CV_EXPORTS_W void rectangle(Mat& img, Point pt1, Point pt2,
+CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2,
const Scalar& color, int thickness=1,
int lineType=8, int shift=0);
//! draws the rectangle outline or a solid rectangle covering rec in the image
-CV_EXPORTS void rectangle(Mat& img, Rect rec,
+CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
const Scalar& color, int thickness=1,
int lineType=8, int shift=0);
//! draws the circle outline or a solid circle in the image
-CV_EXPORTS_W void circle(Mat& img, Point center, int radius,
+CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius,
const Scalar& color, int thickness=1,
int lineType=8, int shift=0);
//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image
-CV_EXPORTS_W void ellipse(Mat& img, Point center, Size axes,
+CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes,
double angle, double startAngle, double endAngle,
const Scalar& color, int thickness=1,
int lineType=8, int shift=0);
//! draws a rotated ellipse in the image
-CV_EXPORTS_W void ellipse(Mat& img, const RotatedRect& box, const Scalar& color,
+CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color,
int thickness=1, int lineType=8);
//! draws a filled convex polygon in the image
@@ -4314,15 +4341,24 @@ class CV_EXPORTS_W Algorithm
CV_WRAP vector<Mat> getMatVector(const string& name) const;
CV_WRAP Ptr<Algorithm> getAlgorithm(const string& name) const;
- CV_WRAP_AS(setInt) void set(const string& name, int value);
- CV_WRAP_AS(setDouble) void set(const string& name, double value);
- CV_WRAP_AS(setBool) void set(const string& name, bool value);
- CV_WRAP_AS(setString) void set(const string& name, const string& value);
- CV_WRAP_AS(setMat) void set(const string& name, const Mat& value);
- CV_WRAP_AS(setMatVector) void set(const string& name, const vector<Mat>& value);
- CV_WRAP_AS(setAlgorithm) void set(const string& name, const Ptr<Algorithm>& value);
+ void set(const string& name, int value);
+ void set(const string& name, double value);
+ void set(const string& name, bool value);
+ void set(const string& name, const string& value);
+ void set(const string& name, const Mat& value);
+ void set(const string& name, const vector<Mat>& value);
+ void set(const string& name, const Ptr<Algorithm>& value);
template<typename _Tp> void set(const string& name, const Ptr<_Tp>& value);
+ CV_WRAP void setInt(const string& name, int value);
+ CV_WRAP void setDouble(const string& name, double value);
+ CV_WRAP void setBool(const string& name, bool value);
+ CV_WRAP void setString(const string& name, const string& value);
+ CV_WRAP void setMat(const string& name, const Mat& value);
+ CV_WRAP void setMatVector(const string& name, const vector<Mat>& value);
+ CV_WRAP void setAlgorithm(const string& name, const Ptr<Algorithm>& value);
+ template<typename _Tp> void setAlgorithm(const string& name, const Ptr<_Tp>& value);
+
void set(const char* name, int value);
void set(const char* name, double value);
void set(const char* name, bool value);
@@ -4332,6 +4368,15 @@ class CV_EXPORTS_W Algorithm
void set(const char* name, const Ptr<Algorithm>& value);
template<typename _Tp> void set(const char* name, const Ptr<_Tp>& value);
+ void setInt(const char* name, int value);
+ void setDouble(const char* name, double value);
+ void setBool(const char* name, bool value);
+ void setString(const char* name, const string& value);
+ void setMat(const char* name, const Mat& value);
+ void setMatVector(const char* name, const vector<Mat>& value);
+ void setAlgorithm(const char* name, const Ptr<Algorithm>& value);
+ template<typename _Tp> void setAlgorithm(const char* name, const Ptr<_Tp>& value);
+
CV_WRAP string paramHelp(const string& name) const;
int paramType(const char* name) const;
CV_WRAP int paramType(const string& name) const;
@@ -4378,6 +4423,11 @@ class CV_EXPORTS AlgorithmInfo
void (Algorithm::*setter)(int)=0,
const string& help=string());
void addParam(Algorithm& algo, const char* name,
+ short& value, bool readOnly=false,
+ int (Algorithm::*getter)()=0,
+ void (Algorithm::*setter)(int)=0,
+ const string& help=string());
+ void addParam(Algorithm& algo, const char* name,
bool& value, bool readOnly=false,
int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(int)=0,
@@ -4426,7 +4476,7 @@ class CV_EXPORTS AlgorithmInfo
struct CV_EXPORTS Param
{
- enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6 };
+ enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, SHORT=10 };
Param();
Param(int _type, bool _readonly, int _offset,
@@ -4457,6 +4507,14 @@ template<> struct ParamType<int>
enum { type = Param::INT };
};
+template<> struct ParamType<short>
+{
+ typedef int const_param_type;
+ typedef int member_type;
+
+ enum { type = Param::SHORT };
+};
+
template<> struct ParamType<double>
{
typedef double const_param_type;
@@ -4497,6 +4555,30 @@ template<> struct ParamType<Algorithm>
enum { type = Param::ALGORITHM };
};
+template<> struct ParamType<float>
+{
+ typedef float const_param_type;
+ typedef float member_type;
+
+ enum { type = Param::FLOAT };
+};
+
+template<> struct ParamType<unsigned>
+{
+ typedef unsigned const_param_type;
+ typedef unsigned member_type;
+
+ enum { type = Param::UNSIGNED_INT };
+};
+
+template<> struct ParamType<uint64>
+{
+ typedef uint64 const_param_type;
+ typedef uint64 member_type;
+
+ enum { type = Param::UINT64 };
+};
+
/*!
"\nThe CommandLineParser class is designed for command line arguments parsing\n"
@@ -4605,6 +4687,47 @@ float CommandLineParser::analyzeValue<float>(const std::string& str, bool space_
template<> CV_EXPORTS
double CommandLineParser::analyzeValue<double>(const std::string& str, bool space_delete);
+
+/////////////////////////////// Parallel Primitives //////////////////////////////////
+
+// a base body class
+class CV_EXPORTS ParallelLoopBody
+{
+public:
+ virtual ~ParallelLoopBody();
+ virtual void operator() (const Range& range) const = 0;
+};
+
+CV_EXPORTS void parallel_for_(const Range& range, const ParallelLoopBody& body, double nstripes=-1.);
+
+/////////////////////////// Synchronization Primitives ///////////////////////////////
+
+class CV_EXPORTS Mutex
+{
+public:
+ Mutex();
+ ~Mutex();
+ Mutex(const Mutex& m);
+ Mutex& operator = (const Mutex& m);
+
+ void lock();
+ bool trylock();
+ void unlock();
+
+ struct Impl;
+protected:
+ Impl* impl;
+};
+
+class CV_EXPORTS AutoLock
+{
+public:
+ AutoLock(Mutex& m) : mutex(&m) { mutex->lock(); }
+ ~AutoLock() { mutex->unlock(); }
+protected:
+ Mutex* mutex;
+};
+
}
#endif // __cplusplus
View
185 opencv2.framework/Versions/A/Headers/core/cuda_devptrs.hpp
@@ -0,0 +1,185 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other GpuMaterials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_DEVPTRS_HPP__
+#define __OPENCV_CORE_DEVPTRS_HPP__
+
+#ifdef __cplusplus
+
+#ifdef __CUDACC__
+ #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
+#else
+ #define __CV_GPU_HOST_DEVICE__
+#endif
+
+namespace cv
+{
+ namespace gpu
+ {
+ // Simple lightweight structures that encapsulates information about an image on device.
+ // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
+
+ template <bool expr> struct StaticAssert;
+ template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
+
+ template<typename T> struct DevPtr
+ {
+ typedef T elem_type;
+ typedef int index_type;
+
+ enum { elem_size = sizeof(elem_type) };
+
+ T* data;
+
+ __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
+ __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
+
+ __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
+ __CV_GPU_HOST_DEVICE__ operator T*() { return data; }
+ __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
+ };
+
+ template<typename T> struct PtrSz : public DevPtr<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
+
+ size_t size;
+ };
+
+ template<typename T> struct PtrStep : public DevPtr<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
+
+ /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
+ size_t step;
+
+ __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
+ __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
+
+ __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ };
+
+ template <typename T> struct PtrStepSz : public PtrStep<T>
+ {
+ __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
+ __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
+ : PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
+
+ template <typename U>
+ explicit PtrStepSz(const PtrStepSz<U>& d) : PtrStep<T>((T*)d.data, d.step), cols(d.cols), rows(d.rows){}
+
+ int cols;
+ int rows;
+ };
+
+ typedef PtrStepSz<unsigned char> PtrStepSzb;
+ typedef PtrStepSz<float> PtrStepSzf;
+ typedef PtrStepSz<int> PtrStepSzi;
+
+ typedef PtrStep<unsigned char> PtrStepb;
+ typedef PtrStep<float> PtrStepf;
+ typedef PtrStep<int> PtrStepi;
+
+
+#if defined __GNUC__
+ #define __CV_GPU_DEPR_BEFORE__
+ #define __CV_GPU_DEPR_AFTER__ __attribute__ ((deprecated))
+#elif defined(__MSVC__) //|| defined(__CUDACC__)
+ #pragma deprecated(DevMem2D_)
+ #define __CV_GPU_DEPR_BEFORE__ __declspec(deprecated)
+ #define __CV_GPU_DEPR_AFTER__
+#else
+ #define __CV_GPU_DEPR_BEFORE__
+ #define __CV_GPU_DEPR_AFTER__
+#endif
+
+ template <typename T> struct __CV_GPU_DEPR_BEFORE__ DevMem2D_ : public PtrStepSz<T>
+ {
+ DevMem2D_() {}
+ DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
+
+ template <typename U>
+ explicit __CV_GPU_DEPR_BEFORE__ DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
+ } __CV_GPU_DEPR_AFTER__ ;
+
+ typedef DevMem2D_<unsigned char> DevMem2Db;
+ typedef DevMem2Db DevMem2D;
+ typedef DevMem2D_<float> DevMem2Df;
+ typedef DevMem2D_<int> DevMem2Di;
+
+ template<typename T> struct PtrElemStep_ : public PtrStep<T>
+ {
+ PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
+ {
+ StaticAssert<256 % sizeof(T) == 0>::check();
+
+ PtrStep<T>::step /= PtrStep<T>::elem_size;
+ }
+ __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
+ __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
+
+ __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
+ __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
+ };
+
+ template<typename T> struct PtrStep_ : public PtrStep<T>
+ {
+ PtrStep_() {}
+ PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
+ };
+
+ typedef PtrElemStep_<unsigned char> PtrElemStep;
+ typedef PtrElemStep_<float> PtrElemStepf;
+ typedef PtrElemStep_<int> PtrElemStepi;
+
+//#undef __CV_GPU_DEPR_BEFORE__
+//#undef __CV_GPU_DEPR_AFTER__
+
+ }
+}
+
+#endif // __cplusplus
+
+#endif /* __OPENCV_CORE_DEVPTRS_HPP__ */
View
204 opencv2.framework/Versions/A/Headers/core/devmem2d.hpp
@@ -1,161 +1,43 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other GpuMaterials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_CORE_DevMem2D_HPP__
-#define __OPENCV_CORE_DevMem2D_HPP__
-
-#ifdef __cplusplus
-
-#ifdef __CUDACC__
- #define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
-#else
- #define __CV_GPU_HOST_DEVICE__
-#endif
-
-namespace cv
-{
- namespace gpu
- {
- // Simple lightweight structures that encapsulates information about an image on device.
- // It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
-
- template <bool expr> struct StaticAssert;
- template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
-
- template<typename T> struct DevPtr
- {
- typedef T elem_type;
- typedef int index_type;
-
- enum { elem_size = sizeof(elem_type) };
-
- T* data;
-
- __CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
- __CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
-
- __CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
- __CV_GPU_HOST_DEVICE__ operator T*() { return data; }
- __CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
- };
-
- template<typename T> struct PtrSz : public DevPtr<T>
- {
- __CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
- __CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
-
- size_t size;
- };
-
- template<typename T> struct PtrStep : public DevPtr<T>
- {
- __CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
- __CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
-
- /** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
- size_t step;
-
- __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
- __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
-
- __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
- __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
- };
-
- template <typename T> struct PtrStepSz : public PtrStep<T>
- {
- __CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
- __CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
- : PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
-
- int cols;
- int rows;
- };
-
- template <typename T> struct DevMem2D_ : public PtrStepSz<T>
- {
- DevMem2D_() {}
- DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
-
- template <typename U>
- explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
- };
-
- template<typename T> struct PtrElemStep_ : public PtrStep<T>
- {
- PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
- {
- StaticAssert<256 % sizeof(T) == 0>::check();
-
- PtrStep<T>::step /= PtrStep<T>::elem_size;
- }
- __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
- __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
-
- __CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
- __CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
- };
-
- template<typename T> struct PtrStep_ : public PtrStep<T>
- {
- PtrStep_() {}
- PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
- };
-
- typedef DevMem2D_<unsigned char> DevMem2Db;
- typedef DevMem2Db DevMem2D;
- typedef DevMem2D_<float> DevMem2Df;
- typedef DevMem2D_<int> DevMem2Di;
-
- typedef PtrStep<unsigned char> PtrStepb;
- typedef PtrStep<float> PtrStepf;
- typedef PtrStep<int> PtrStepi;
-
- typedef PtrElemStep_<unsigned char> PtrElemStep;
- typedef PtrElemStep_<float> PtrElemStepf;
- typedef PtrElemStep_<int> PtrElemStepi;
- }
-}
-
-#endif // __cplusplus
-
-#endif /* __OPENCV_GPU_DevMem2D_HPP__ */
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other GpuMaterials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "opencv2/core/cuda_devptrs.hpp"
View
98 opencv2.framework/Versions/A/Headers/core/eigen.hpp
@@ -73,7 +73,7 @@ void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCo
_src.copyTo(dst);
}
}
-
+
template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
@@ -103,6 +103,27 @@ void cv2eigen( const Mat& src,
}
}
+// Matx case
+template<typename _Tp, int _rows, int _cols, int _options, int _maxRows, int _maxCols>
+void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
+ Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst )
+{
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(_cols, _rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(_rows, _cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ Mat(src).copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
template<typename _Tp>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
@@ -132,14 +153,35 @@ void cv2eigen( const Mat& src,
}
}
-
+// Matx case
+template<typename _Tp, int _rows, int _cols>
+void cv2eigen( const Matx<_Tp, _rows, _cols>& src,
+ Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst )
+{
+ dst.resize(_rows, _cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(_cols, _rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(_rows, _cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ Mat(src).copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
template<typename _Tp>
void cv2eigen( const Mat& src,
Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
{
CV_Assert(src.cols == 1);
dst.resize(src.rows);
-
+
if( !(dst.Flags & Eigen::RowMajorBit) )
{
Mat _dst(src.cols, src.rows, DataType<_Tp>::type,
@@ -159,6 +201,29 @@ void cv2eigen( const Mat& src,
}
}
+// Matx case
+template<typename _Tp, int _rows>
+void cv2eigen( const Matx<_Tp, _rows, 1>& src,
+ Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst )
+{
+ dst.resize(_rows);
+
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(1, _rows, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(_rows, 1, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ src.copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
template<typename _Tp>
void cv2eigen( const Mat& src,
@@ -183,8 +248,31 @@ void cv2eigen( const Mat& src,
src.convertTo(_dst, _dst.type());
CV_DbgAssert(_dst.data == (uchar*)dst.data());
}
-}
-
+}
+
+//Matx
+template<typename _Tp, int _cols>
+void cv2eigen( const Matx<_Tp, 1, _cols>& src,
+ Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst )
+{
+ dst.resize(_cols);
+ if( !(dst.Flags & Eigen::RowMajorBit) )
+ {
+ Mat _dst(_cols, 1, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ transpose(src, _dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+ else
+ {
+ Mat _dst(1, _cols, DataType<_Tp>::type,
+ dst.data(), (size_t)(dst.stride()*sizeof(_Tp)));
+ Mat(src).copyTo(_dst);
+ CV_DbgAssert(_dst.data == (uchar*)dst.data());
+ }
+}
+
+
}
#endif
View
1,142 opencv2.framework/Versions/A/Headers/core/gpumat.hpp
@@ -1,565 +1,577 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other GpuMaterials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_GPUMAT_HPP__
-#define __OPENCV_GPUMAT_HPP__
-
-#ifdef __cplusplus
-
-#include "opencv2/core/core.hpp"
-#include "opencv2/core/devmem2d.hpp"
-
-namespace cv { namespace gpu
-{
- //////////////////////////////// Initialization & Info ////////////////////////
-
- //! This is the only function that do not throw exceptions if the library is compiled without Cuda.
- CV_EXPORTS int getCudaEnabledDeviceCount();
-
- //! Functions below throw cv::Expception if the library is compiled without Cuda.
-
- CV_EXPORTS void setDevice(int device);
- CV_EXPORTS int getDevice();
-
- //! Explicitly destroys and cleans up all resources associated with the current device in the current process.
- //! Any subsequent API call to this device will reinitialize the device.
- CV_EXPORTS void resetDevice();
-
- enum FeatureSet
- {
- FEATURE_SET_COMPUTE_10 = 10,
- FEATURE_SET_COMPUTE_11 = 11,
- FEATURE_SET_COMPUTE_12 = 12,
- FEATURE_SET_COMPUTE_13 = 13,
- FEATURE_SET_COMPUTE_20 = 20,
- FEATURE_SET_COMPUTE_21 = 21,
- GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
- SHARED_ATOMICS = FEATURE_SET_COMPUTE_12,
- NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13
- };
-
- // Gives information about what GPU archs this OpenCV GPU module was
- // compiled for
- class CV_EXPORTS TargetArchs
- {
- public:
- static bool builtWith(FeatureSet feature_set);
- static bool has(int major, int minor);
- static bool hasPtx(int major, int minor);
- static bool hasBin(int major, int minor);
- static bool hasEqualOrLessPtx(int major, int minor);
- static bool hasEqualOrGreater(int major, int minor);
- static bool hasEqualOrGreaterPtx(int major, int minor);
- static bool hasEqualOrGreaterBin(int major, int minor);
- private:
- TargetArchs();
- };
-
- // Gives information about the given GPU
- class CV_EXPORTS DeviceInfo
- {
- public:
- // Creates DeviceInfo object for the current GPU
- DeviceInfo() : device_id_(getDevice()) { query(); }
-
- // Creates DeviceInfo object for the given GPU
- DeviceInfo(int device_id) : device_id_(device_id) { query(); }
-
- std::string name() const { return name_; }
-
- // Return compute capability versions
- int majorVersion() const { return majorVersion_; }
- int minorVersion() const { return minorVersion_; }
-
- int multiProcessorCount() const { return multi_processor_count_; }
-
- size_t freeMemory() const;
- size_t totalMemory() const;
-
- // Checks whether device supports the given feature
- bool supports(FeatureSet feature_set) const;
-
- // Checks whether the GPU module can be run on the given device
- bool isCompatible() const;
-
- int deviceID() const { return device_id_; }
-
- private:
- void query();
- void queryMemory(size_t& free_memory, size_t& total_memory) const;
-
- int device_id_;
-
- std::string name_;
- int multi_processor_count_;
- int majorVersion_;
- int minorVersion_;
- };
-
- CV_EXPORTS void printCudaDeviceInfo(int device);
- CV_EXPORTS void printShortCudaDeviceInfo(int device);
-
- //////////////////////////////// GpuMat ///////////////////////////////
-
- //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat.
- class CV_EXPORTS GpuMat
- {
- public:
- //! default constructor
- GpuMat();
-
- //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
- GpuMat(int rows, int cols, int type);
- GpuMat(Size size, int type);
-
- //! constucts GpuMatrix and fills it with the specified value _s.
- GpuMat(int rows, int cols, int type, Scalar s);
- GpuMat(Size size, int type, Scalar s);
-
- //! copy constructor
- GpuMat(const GpuMat& m);
-
- //! constructor for GpuMatrix headers pointing to user-allocated data
- GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
- GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
-
- //! creates a matrix header for a part of the bigger matrix
- GpuMat(const GpuMat& m, Range rowRange, Range colRange);
- GpuMat(const GpuMat& m, Rect roi);
-
- //! builds GpuMat from Mat. Perfom blocking upload to device.
- explicit GpuMat(const Mat& m);
-
- //! destructor - calls release()
- ~GpuMat();
-
- //! assignment operators