diff --git a/CMakeLists.txt b/CMakeLists.txt
index eebb7e1a..feef30f7 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 2.8.11)
+cmake_minimum_required(VERSION 2.8.12)
include(CheckSymbolExists)
include(GNUInstallDirs) # populate CMAKE_INSTALL_{LIB,BIN}DIR
@@ -467,6 +467,15 @@ if (BUILD_TESTS)
add_subdirectory(utest)
endif()
+# ========================= Python module =============================
+option(BUILD_PYTHON_MODULE "Build the python module for libpointmatcher" OFF)
+option(USE_SYSTEM_PYBIND11 "Use the system installed pybind11 rather than using a git submodule" ON)
+set(PYTHON_INSTALL_TARGET "" CACHE PATH "Target where to install the python module")
+
+if (BUILD_PYTHON_MODULE)
+ add_subdirectory(python)
+endif()
+
#=================== allow find_package() =========================
#
# the following case be used in an external project requiring libpointmatcher:
diff --git a/doc/CompilationPython.md b/doc/CompilationPython.md
new file mode 100644
index 00000000..89ee1182
--- /dev/null
+++ b/doc/CompilationPython.md
@@ -0,0 +1,179 @@
+| [Tutorials Home](index.md) | [Previous](UnitTestDev.md) | [Next](PythonModule.md) |
+| :--- | :---: | ---: |
+
+# Compiling libpointmatcher with Python
+
+This tutorial presents the different steps of compiling *pypointmatcher*, the libpointmatcher's Python module, on Ubuntu and Mac OS X.
+
+## Prerequisites
+
+To get started, you will need the same prerequisites as libpointmatcher, but also some additional dependencies as listed here:
+
+| Name | Version
(Tested August 2020 on Ubuntu 18.04) |
+| :--- | :---: |
+| pybind11 | 2.5.0 |
+| Python3 | 3.6.9 |
+|||
+| **Dependencies** | |
+| python3-dev | 3.6.7 |
+| catch | 1.10.0 |
+| pytest | 5.4.3 |
+
+`pytest` needs to be installed with `pip`:
+
+```bash
+pip3 install pytest
+```
+
+But `catch` and `python3-dev` need to be installed with a package manager:
+
+*Ubuntu users:*
+
+```bash
+sudo apt install catch python3-dev
+```
+
+*Mac OS users*:
+
+```bash
+brew install catch2
+```
+
+The rest of this tutorial will guide you through the necessary steps to compile pypointmatcher.
+
+## pybind11
+
+pybind11 is a library used to create Python bindings of existing C++ code and vice versa. So, in order to be able to compile pypointmatcher, you must either install pybind11 on your system or add it as a git submodule in the libpointmatcher's `contrib/` directory. You must then create a symbolic link to this git submodule in the `python/` directory. Go [here](#installing-pybind11) for the installation steps or [here](#adding-pybind11) for the git sudmodule steps.
+
+### Installing pybind11 (recommended)
+
+The very first step is to clone [pybind11](https://github.com/pybind/pybind11) into a directory of your choice.
+
+At the moment, pypointmatcher can only be compiled with **version 2.5.0** of pybind11. To install the right version, run the following commands:
+
+```bash
+cd pybind11
+git checkout v2.5.0
+```
+
+Once this is done, run the following commands:
+
+```bash
+mkdir build && cd build
+cmake ..
+make check -j 4
+```
+
+This will both compile and run pybind11 tests. Next, you can install pybind11 by running this command:
+
+```bash
+sudo make install
+```
+
+Once this is done, return to libpointmatcher's `build/` directory.
+
+You're now ready to proceed to the [configuration step](#configuration).
+
+### Adding pybind11 as a `git` submodule
+
+An alternative to installing pybind11 on your system is to add its repository as a git submodule and create a symbolic link into the `python/` directory. To do this, you will first need to clone the repository as a git submodule by running the following commands in your terminal from the `contrib/` directory.
+
+```bash
+cd contrib
+git submodule add https://github.com/pybind/pybind11.git
+```
+
+This will add pybind11 as a git submodule of libpointmatcher into the `contrib/` directory. Then, still from the `contrib/` directory, run this command to create a symbolic link to pybind11 in the `python/` directory:
+
+```bash
+ln -sr pybind11 ../python/pybind11
+```
+
+At the moment, pypointmatcher can only be compiled with **version 2.5.0** of pybind11. To install the right version, run the following commands:
+
+```bash
+cd pybind11
+git checkout v2.5.0
+```
+
+Finally, tell CMake that you want to use pybind11 as a git submodule by setting the `USE_SYSTEM_PYBIND11` variable to `OFF`:
+
+```bash
+cmake -D USE_SYSTEM_PYBIND11=OFF ..
+```
+
+> ***IMPORTANT:*** When this method is used, it is very important to checkout the version **2.5.0** of pybind11 or it will be impossible to generate the build files.
+
+Once this is done, return to libpointmatcher's `build/` directory.
+
+You're now ready to proceed to the [configuration step](#configuration).
+
+## Configuring the variables
+
+> ***Note:*** *It is recommended to create a virtual environment before proceeding with the next steps. For this, you can use the [virtualenv tool](https://virtualenv.pypa.io/en/stable/). If you are not familiar with Python virtual environments, you can [read this tutorial](https://realpython.com/python-virtual-environments-a-primer/), which explains very well the reasons for using a virtual environment, or [watch this video tutorial](https://youtu.be/nnhjvHYRsmM)*
+
+#### Specifying the path
+
+First, you need to specify where you want the module to be installed. To do so, you must provide the path by setting the CMake variable `PYTHON_INSTALL_TARGET` with an absolute path to your Python environment `site-packages` location. This can be achieve manually or automatically.
+
+##### The manual way:
+
+Launch the Python interpreter and run the following commands to find the path to the `site-packages/` directory of your current Python environment:
+
+```bash
+>>> import site
+>>> site.getsitepackages()
+```
+
+> ***Note:*** If you are using the system's Python environment, replace the `getsitepackages()` function call by `getusersitepackages()`.
+
+This will output a list of installation paths for your current Python environment. Now, choose the one that is located in the `python_env_path/lib/python3.x/` directory. The command to run should look like this:
+
+```bash
+cmake -D PYTHON_INSTALL_TARGET=python_env_path/lib/python3.x/site-packages ..
+```
+
+> ***NOTE:*** Replace the `x` with your Python minor version number.
+
+##### The automatic way:
+
+If you don't want to set the path manually, here's a command that should automatically pick the right one for you:
+
+```bash
+cmake -D PYTHON_INSTALL_TARGET=$(python3 -c "import site; print(site.getsitepackages()[0])") ..
+```
+
+> ***Note:*** If you are using the system's Python environment, replace the `site.getsitepackages()[0]` by `site.getusersitepackages()`.
+
+> ***IMPORTANT:*** *This last example is the default behavior if no path has been set before compiling the module.* ***Please, make sure that this corresponds to a valid location or the module will be installed in a wrong location and this will lead to an import error.***
+
+#### Enabling the compilation
+
+By default, pypointmatcher compilation is disabled. In order to compile it, you must set the CMake variable `BUILD_PYTHON_MODULE` to `ON`:
+
+```bash
+cmake -D BUILD_PYTHON_MODULE=ON ..
+```
+
+Everything is now set up to proceed to the compilation and the installation.
+
+## Compilation
+
+Now, to compile pypointmatcher into the `build/` directory, run the following command:
+
+```bash
+make pypointmatcher -j N
+```
+
+where `N` is the number of jobs (or threads) you allow at once on your computer for the compilation. If no argument is passed after `-j`, there will be no limit to the number of jobs.
+
+> ***Note:*** *Depending on your system, the compilation can take quite some time, so consider leaving the `-j` command with no argument in order to speed up this step.*
+
+## Installation
+
+And finally, to install the module on your system, run the following command:
+
+```bash
+sudo make install
+```
+
diff --git a/doc/PythonModule.md b/doc/PythonModule.md
new file mode 100644
index 00000000..11c64416
--- /dev/null
+++ b/doc/PythonModule.md
@@ -0,0 +1,67 @@
+| [Tutorials Home](index.md) | [Previous](CompilationPython.md) |
+| :--- | ---: |
+
+# Using libpointmatcher with Python
+
+This tutorial presents the different things to know before using *pypointmatcher*, the libpointmatcher's Python module.
+
+## Differences between the C++ and Python APIs
+
+Despite the fact that pypointmatcher and libpointmatcher have very similar APIs, the fact remains that they differ in some ways. So, why not start by listing these differences.
+
+#### STL containers vs Python data structures
+
+pybind11 provides automatic conversion between C++'s STL containers and their Python equivalent data structures. That is, `std::vector`/`std::deque`/`std::list`/`std::array` are converted into a Python `list`, `std::set`/`std::unordered_set` are converted into a Python `set` and finally `std::map`/`std::unordered_map` are converted into a Python `dict`.
+
+Although this can be very useful, it comes with some major downsides, [which you can see here](https://pybind11.readthedocs.io/en/latest/advanced/cast/stl.html#automatic-conversion), hence the need to make the classes inheriting from STL containers and some `typedef` *"opaque"*. For the moment, only the `std::vector` and the `std::map` containers are made *"opaque"*, i.e. they keep the same name as the one used in libpointmatcher, but they adopt the behavior of a `list` and a `dict` respectively.
+
+> ***Note:*** This also means that these opaque STL containers must be used in constructors and methods that accept one of these types as parameter.
+
+For more information about pybind11 STL containers conversion, visit [this section](https://pybind11.readthedocs.io/en/latest/advanced/cast/stl.html) of the official documentation.
+
+#### Eigen vs Numpy
+
+pybind11 also provides transparent conversion between *Eigen*'s `Matrix`, `Map` and `SparseMatrix` and *numpy*'s `array` and `ndarray` data types. That is, you can seamlessly use *numpy*'s `ndarray` instead of *Eigen*'s `Vector` or `Matrix`.
+
+For more information about pybind11 *Eigen* to *numpy* data type conversion, visit [this section](https://pybind11.readthedocs.io/en/latest/advanced/cast/eigen.html) of the official documentation.
+
+#### Overloaded methods based on constness
+
+In libpointmatcher, more precisely in the `DataPoints` class, some methods are overloaded based on constness, i.e. they will be called with a constant `DataPoints`. So, to avoid ambiguous calls, the suffix `_const` has been appended to the method names. E.g. in the `compute_overlap.py` example, the `getDescriptorViewByName("inliers")` method was calling the `const` version before this fix. For more information on pybind11 overloaded method mechanisms, visit [this section](https://pybind11.readthedocs.io/en/latest/classes.html#overloaded-methods) of the official documentation.
+
+#### Contructors/methods with std::istream or std::ostream as paramater
+
+Some constructors and methods of libpointmatcher have as parameter either an `std::istream` to build an object from a YAML configuration file or an `std::ostream` to dump information. pybind11 doesn't allow to call these constructors/methods with their Python equivalent, i.e. `sys.stdin` and `sys.stdout`. So, to get around this problem, the constructors/methods having a `std::istream` as parameter must be used with a `std::string` instead and those having a `std::ostream` must be used without parameter.
+
+## Structure of pypointmatcher
+
+Before going further, here is the general structure of pypointmatcher to give you a better idea of how to use it.
+
+```textmate
+pypointmatcher # The main module.
+|
+|_ pointmatcher # The submodule containing functions and classes that are dependent on scalar types.
+|
+|_ pointmatchersupport # The submodule containing everything defined in the
+| # pointmatchersupport namespace, i.e. functions and classes which are not dependent on scalar types.
+|
+|_ datapointsfilters # The submodule containing the differents DataPointsFilters.
+|
+|_ errorminimizers # The submodule containing the differents ErrorMinimizers.
+```
+
+## General use of pypointmatcher
+
+To help you get familiar with pypointmatcher, some C++ examples have been translated to show you how it is possible to use the Python version of libpointmatcher.
+
+Now that you know the major differences between the C++ and Python API, we suggest newcomers and those who are less familiar with the library to follow, or re-follow, the tutorials in the beginner and advanced sections using the Python examples instead, in order to have a better understanding of how libpointmatcher can be used with Python.
+
+Experienced users of libpointmatcher can, if they wish, take a look at the different Python examples located in the `examples/python/` directory and compare their uses with their C++ counterparts, keeping in mind what makes them different from each other.
+
+The major difference is that they don't require command line arguments like the C++ version. All you have to do is open a terminal and go to the `examples/python/` directory and run one of the examples as a Python module script:
+
+```bash
+python3 icp_simple.py
+```
+
+> ***Note:*** All the information to run a default ICP algorithm is already in the examples, it is up to the user to change these values in order to run a custom ICP algorithm. So take the time to understand what the code does in the examples before making changes.
diff --git a/doc/UnitTestDev.md b/doc/UnitTestDev.md
index bb4a81ad..ad19d697 100644
--- a/doc/UnitTestDev.md
+++ b/doc/UnitTestDev.md
@@ -1,5 +1,5 @@
-| [Tutorials Home](index.md) | [Previous](TransformationDev.md) |
-| :--- | ---: |
+| [Tutorials Home](index.md) | [Previous](TransformationDev.md) | [Next](CompilationPython.md) |
+| :--- | :---: | ---: |
# Testing libpointmatcher Modules
diff --git a/doc/index.md b/doc/index.md
index 90147a9f..35486269 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -4,15 +4,15 @@
# Tutorials
-This page lists the available tutorials for libpointmatcher. The [Beginner Section](#beginner) is aimed at the more casual user and contains high-level information on the various steps of point cloud registration. The [Advanced Section](#advanced) is targeted at those with existing experience with point cloud registration and proficiency in C++ development. Those who wish to contribute to libpointmatcher can follow the guidelines in the [Developer](#developer) section.
+This page lists the available tutorials for libpointmatcher. The [Beginner](#beginner) section is aimed at the more casual user and contains high-level information on the various steps of point cloud registration. The [Advanced](#advanced) section is targeted at those with existing experience with point cloud registration and proficiency in C++ development. Those who wish to contribute to libpointmatcher can follow the guidelines in the [Developer](#developer) section. Finally, the [Python](#python) section is intended for those who are interested in using libpointmatcher with *Python*.
-## Compilation
+## Compilation
- [Ubuntu: How to compile libpointmatcher](CompilationUbuntu.md)
- [Mac OS X: How to compile libpointmatcher](CompilationMac.md)
- [Windows: How to compile libpointmatcher](CompilationWindows.md)
-## Beginner
+## Beginner
- [What is libpointmatcher about?](Introduction.md)
- [What can I do with libpointmatcher?](ApplicationsAndPub.md)
@@ -24,7 +24,7 @@ This page lists the available tutorials for libpointmatcher. The [Beginner Secti
- [Configuring libpointmatcher using YAML](Configuration.md)
- [Supported file types and importing/exporting point clouds](ImportExport.md)
-## Advanced
+## Advanced
- [How to link a project to libpointmatcher?](LinkingProjects.md)
- [How to use libpointmatcher in ROS?](UsingInRos.md)
@@ -39,12 +39,17 @@ This page lists the available tutorials for libpointmatcher. The [Beginner Secti
- How to do a nearest neighbor search between two point clouds without an ICP object? [See the comments here.](https://github.com/ethz-asl/libpointmatcher/issues/193#issuecomment-276093785)
- How to construct a `DataPoints` from my own point cloud? [See the unit test on `Datapoints` here.](https://github.com/ethz-asl/libpointmatcher/blob/master/utest/ui/DataFilters.cpp#L52)
-## Developer
+## Developer
- [Creating a DataPointsFilter](DataPointsFilterDev.md)
- [Creating a Transformation](TransformationDev.md)
- [Creating unit tests](UnitTestDev.md)
+## Python
+
+- [Compiling libpointmatcher with Python](CompilationPython.md)
+- [Using libpointmatcher with Python](PythonModule.md)
+
**Note**: if you don't find what you need, don't hesitate to propose or participate to new tutorials.
---
diff --git a/examples/python/align_sequence.py b/examples/python/align_sequence.py
new file mode 100644
index 00000000..ec569cee
--- /dev/null
+++ b/examples/python/align_sequence.py
@@ -0,0 +1,97 @@
+# Code example for ICP taking a sequence of point clouds relatively close
+# and build a map with them.
+# It assumes that: 3D point clouds are used, they were recorded in sequence
+# and they are express in sensor frame.
+
+import numpy as np
+
+from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
+
+PM = pm.PointMatcher
+PMIO = pm.PointMatcherIO
+DP = PM.DataPoints
+params = pms.Parametrizable.Parameters()
+
+# Path of output directory (default: tests/align_sequence/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/align_sequence/"
+
+# Name of output files (default: align_sequence)
+output_file_name = "align_sequence"
+
+# Rigid transformation
+rigid_trans = PM.get().TransformationRegistrar.create("RigidTransformation")
+
+# Create filters manually to clean the global map
+params["knn"] = "10"
+params["epsilon"] = "5"
+params["keepNormals"] = "0"
+params["keepDensities"] = "1"
+density_filter = PM.get().DataPointsFilterRegistrar.create("SurfaceNormalDataPointsFilter", params)
+params.clear()
+
+params["maxDensity"] = "30"
+max_density_subsample = PM.get().DataPointsFilterRegistrar.create("MaxDensityDataPointsFilter",
+ params)
+params.clear()
+
+# Main algorithm definition
+icp = PM.ICP()
+
+# load YAML config
+config_file = "../data/default.yaml"
+pms.validateFile(config_file)
+icp.loadFromYaml(config_file)
+
+# Loading the list of files
+# file_info_list = PMIO.FileInfoVector("../data/carCloudList.csv", "../data/")
+# or
+file_info_list = PMIO.FileInfoVector("../data/cloudList.csv", "../data/")
+
+map_point_cloud = DP()
+new_cloud = DP()
+
+T_to_map_from_new = np.identity(4) # assumes 3D
+
+for i in range(len(file_info_list)):
+ print(f"---------------------\nLoading: {file_info_list[i].readingFileName}")
+
+ # It is assume that the point cloud is express in sensor frame
+ new_cloud = DP.load(file_info_list[i].readingFileName)
+
+ if map_point_cloud.getNbPoints() == 0:
+ map_point_cloud = new_cloud
+ continue
+
+ # call ICP
+ try:
+ # We use the last transformation as a prior
+ # this assumes that the point clouds were recorded in
+ # sequence.
+ prior = T_to_map_from_new
+
+ T_to_map_from_new = icp(new_cloud, map_point_cloud, prior)
+
+ except PM.ConvergenceError as CE:
+ print(f"ERROR PM.ICP failed to converge: \n\t{CE}\n\n")
+ continue
+
+ # This is not necessary in this example, but could be
+ # useful if the same matrix is composed in the loop.
+ T_to_map_from_new = rigid_trans.correctParameters(T_to_map_from_new)
+
+ # Move the new point cloud in the map reference
+ new_cloud = rigid_trans.compute(new_cloud, T_to_map_from_new)
+
+ # Merge point clouds to map
+ map_point_cloud.concatenate(new_cloud)
+
+ # Clean the map
+ map_point_cloud = density_filter.filter(map_point_cloud)
+ map_point_cloud = max_density_subsample.filter(map_point_cloud)
+
+ # Save the map at each iteration
+ output_file_name_iter = f"{output_file_name}_{i}.vtk"
+ print(f"outputFileName: {output_file_name_iter}")
+ map_point_cloud.save(f"{output_base_directory}{output_file_name_iter}")
diff --git a/examples/python/build_map.py b/examples/python/build_map.py
new file mode 100644
index 00000000..7101b66a
--- /dev/null
+++ b/examples/python/build_map.py
@@ -0,0 +1,139 @@
+# Code example for DataFilter taking a sequence of point clouds with
+# their global coordinates and build a map with a fix (manageable) number of points.
+# The example shows how to generate filters in the source code.
+# For an example generating filters using yaml configuration, see demo_cmake/convert.cpp
+# For an example with a registration solution, see icp.cpp
+
+import numpy as np
+
+from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
+
+PM = pm.PointMatcher
+PMIO = pm.PointMatcherIO
+DP = PM.DataPoints
+params = pms.Parametrizable.Parameters()
+
+# Loading the list of files
+file_info_list = PMIO.FileInfoVector("../data/carCloudList.csv", "../data/")
+total_point_count = 30000
+
+# Path of output directory (default: tests/build_map/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/build_map/"
+
+# Name of output file: file_name.{vtk,csv,ply} (default: test.vtk)
+output_file_name = "test.vtk"
+
+pms.setLogger(PM.get().LoggerRegistrar.create("FileLogger"))
+
+map_cloud = DP()
+last_cloud = DP()
+new_cloud = DP()
+
+T = np.identity(4)
+
+# Define transformation chain
+transformation = PM.get().TransformationRegistrar.create("RigidTransformation")
+
+# This filter will remove a sphere of 1 m radius. Easy way to remove the sensor self-scanning.
+params["minDist"] = "1.0"
+remove_scanner = PM.get().DataPointsFilterRegistrar.create("MinDistDataPointsFilter", params)
+params.clear()
+
+# This filter will randomly remove 35% of the points.
+params["prob"] = "0.65"
+rand_subsample = PM.get().DataPointsFilterRegistrar.create("RandomSamplingDataPointsFilter", params)
+params.clear()
+
+# For a complete description of filter, see
+# https://github.com/ethz-asl/libpointmatcher/blob/master/doc/Datafilters.md
+params["knn"] = "10"
+params["epsilon"] = "5"
+params["keepNormals"] = "1"
+params["keepDensities"] = "0"
+normal_filter = PM.get().DataPointsFilterRegistrar.create("SurfaceNormalDataPointsFilter", params)
+params.clear()
+
+params["knn"] = "10"
+params["epsilon"] = "5"
+params["keepDensities"] = "1"
+params["keepNormals"] = "0"
+density_filter = PM.get().DataPointsFilterRegistrar.create("SurfaceNormalDataPointsFilter", params)
+params.clear()
+
+observation_direction_filter = PM.get().DataPointsFilterRegistrar.create("ObservationDirectionDataPointsFilter")
+
+params["towardCenter"] = "1"
+orien_normal_filter = PM.get().DataPointsFilterRegistrar.create("OrientNormalsDataPointsFilter", params)
+params.clear()
+
+params["maxDensity"] = "30"
+uniform_subsample = PM.get().DataPointsFilterRegistrar.create("MaxDensityDataPointsFilter", params)
+params.clear()
+
+shadow_filter = PM.get().DataPointsFilterRegistrar.create("ShadowDataPointsFilter")
+
+for i in range(len(file_info_list)):
+ print("\n-----------------------------")
+ print(f"Loading {file_info_list[i].readingFileName} ", end="")
+
+ new_cloud = DP.load(file_info_list[i].readingFileName)
+
+ print(f"found {new_cloud.getNbPoints()} points.")
+
+ if file_info_list[i].groundTruthTransformation.shape[0] != 0:
+ T = file_info_list[i].groundTruthTransformation
+ else:
+ print("ERROR: the field gTXX (ground truth) is required")
+ exit()
+
+ # Remove the scanner
+ new_cloud = remove_scanner.filter(new_cloud)
+
+ # Accelerate the process and dissolve lines
+ new_cloud = rand_subsample.filter(new_cloud)
+
+ # Build filter to remove shadow points and down-sample
+ new_cloud = normal_filter.filter(new_cloud)
+ new_cloud = observation_direction_filter.filter(new_cloud)
+ new_cloud = orien_normal_filter.filter(new_cloud)
+ new_cloud = shadow_filter.filter(new_cloud)
+
+ # Transforme pointCloud
+ print(f"Transformation matrix:\n{T}\n".replace("[", " ").replace("]", " "), end="")
+ new_cloud = transformation.compute(new_cloud, T)
+
+ if i == 0:
+ map_cloud = new_cloud
+ else:
+ map_cloud.concatenate(new_cloud)
+
+ # Control point cloud size
+ prob_to_keep = total_point_count / map_cloud.features.shape[1]
+
+ if prob_to_keep < 1:
+ map_cloud = density_filter.filter(map_cloud)
+ map_cloud = uniform_subsample.filter(map_cloud)
+
+ prob_to_keep = total_point_count / map_cloud.features.shape[1]
+
+ if prob_to_keep < 1:
+ print(f"Randomly keep {prob_to_keep * 100}% points")
+
+ rand_subsample = PM.get().DataPointsFilterRegistrar.create(
+ "RandomSamplingDataPointsFilter",
+ {"prob": f"{prob_to_keep}"})
+
+ map_cloud = rand_subsample.filter(map_cloud)
+
+ map_cloud.save(f"{output_base_directory + output_file_name[:-4]}_{i}.vtk")
+
+map_cloud = density_filter.filter(map_cloud)
+map_cloud = uniform_subsample.filter(map_cloud)
+map_cloud = density_filter.filter(map_cloud)
+
+print("\n-----------------------------"*2)
+print(f"Final number of points in the map: {map_cloud.getNbPoints()}")
+
+map_cloud.save(f"{output_base_directory + output_file_name}")
diff --git a/examples/python/compute_overlap.py b/examples/python/compute_overlap.py
new file mode 100644
index 00000000..50eeb8d4
--- /dev/null
+++ b/examples/python/compute_overlap.py
@@ -0,0 +1,183 @@
+import numpy as np
+
+from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
+
+# Code example for DataFilter taking a sequence of point clouds with
+# their global coordinates and build a map with a fix (manageable) number of points
+
+PM = pm.PointMatcher
+PMIO = pm.PointMatcherIO
+DP = PM.DataPoints
+Matches = PM.Matches
+params = pms.Parametrizable.Parameters()
+
+# Path of output directory (default: tests/compute_overlap/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/compute_overlap/"
+
+# Name of output files (default: test)
+output_base_file = "test"
+
+# Loading the list of files
+file_info_list = PMIO.FileInfoVector("../data/carCloudList.csv", "../data/")
+# or
+# file_info_vec = PMIO.FileInfoVector("../data/cloudList.csv", "../data/")
+
+# If True, it will compute the overlap of only 2 point cloud ids
+# and dump VTK files for visual inspection
+debug_mode = False
+
+# Choose the first point cloud to compute the overlap and debug.
+# Must be different than debug_J
+debug_I = 1
+
+# Choose the second point cloud to compute the overlap and debug
+# Must be different than debug_I
+debug_J = 0
+
+if debug_mode:
+ pms.setLogger(PM.get().LoggerRegistrar.create("FileLogger"))
+
+# Prepare transformation chain for maps
+rigid_trans = PM.get().TransformationRegistrar.create("RigidTransformation")
+
+transformations = PM.Transformations()
+transformations.append(rigid_trans)
+
+Tread = np.identity(4)
+Tref = np.identity(4)
+
+starting_I = 0
+list_size_I = len(file_info_list)
+list_size_J = len(file_info_list)
+
+overlap_results = np.ones((list_size_J, list_size_I), np.float)
+
+if debug_mode:
+ starting_I = debug_I
+ list_size_I = starting_I + 1
+
+for i in range(starting_I, list_size_I):
+ starting_J = i + 1
+
+ if debug_mode:
+ starting_J = debug_J
+ list_size_J = starting_J + 1
+
+ for j in range(starting_J, list_size_J):
+ # Load point clouds
+ reading = DP.load(file_info_list[i].readingFileName)
+ reference = DP.load(file_info_list[j].readingFileName)
+
+ print("Point cloud loaded")
+
+ # Load transformation matrices
+ if file_info_list[i].groundTruthTransformation.shape[0] != 0:
+ Tread = file_info_list[i].groundTruthTransformation
+ Tref = file_info_list[j].groundTruthTransformation
+ else:
+ print("ERROR: fields gTXX (i.e., ground truth matrix) is required")
+ exit()
+
+ # Move point cloud in global frame
+ transformations.apply(reading, Tread)
+ transformations.apply(reference, Tref)
+
+ # Prepare filters
+ params["prob"] = "0.5"
+ sub_sample = PM.get().DataPointsFilterRegistrar.create("RandomSamplingDataPointsFilter",
+ params)
+ params.clear()
+
+ max_density = PM.get().DataPointsFilterRegistrar.create("MaxDensityDataPointsFilter")
+
+ # params["dim"] = "1"
+ # params["minDist"] = "0"
+ # cut_in_half = PM.get().DataPointsFilterRegistrar.create("MinDistDataPointsFilter",
+ # params)
+ # params.clear()
+
+ params["knn"] = "20"
+ params["keepDensities"] = "1"
+ compute_density = PM.get().DataPointsFilterRegistrar.create("SurfaceNormalDataPointsFilter",
+ params)
+ params.clear()
+
+ reading = sub_sample.filter(reading)
+ reading = compute_density.filter(reading)
+ reading = max_density.filter(reading)
+ # reading = cut_in_half.filter(reading)
+ inliers_read = np.zeros((1, reading.features.shape[1]))
+ reading.addDescriptor("inliers", inliers_read)
+
+ reference = sub_sample.filter(reference)
+ reference = compute_density.filter(reference)
+ reference = max_density.filter(reference)
+ inliers_ref = np.zeros((1, reference.features.shape[1]))
+ reference.addDescriptor("inliers", inliers_ref)
+
+ self = reading
+ target = reference
+
+ for l in range(2):
+ self_pts_count = self.features.shape[1]
+ target_pts_count = target.features.shape[1]
+
+ # Build kd-tree
+ knn = 20
+ knn_all = 50
+
+ params["knn"] = str(knn)
+ matcher_self = PM.get().MatcherRegistrar.create("KDTreeMatcher", params)
+ params.clear()
+
+ params["knn"] = str(knn_all)
+ params["maxDistField"] = "maxSearchDist"
+ matcher_target = PM.get().MatcherRegistrar.create("KDTreeVarDistMatcher", params)
+ params.clear()
+
+ matcher_self.init(self)
+ matcher_target.init(target)
+
+ self_matches = Matches(knn, self_pts_count)
+ self_matches = matcher_self.findClosests(self)
+
+ max_search_dist = np.sqrt(self_matches.dists.max(axis=0, keepdims=True), order='F')
+ self.addDescriptor("maxSearchDist", max_search_dist)
+
+ target_matches = Matches(knn_all, target_pts_count)
+ target_matches = matcher_target.findClosests(self)
+
+ inlier_self = self.getDescriptorViewByName("inliers")
+ inlier_target = target.getDescriptorViewByName("inliers")
+
+ for m in range(self_pts_count):
+ for n in range(knn_all):
+ if target_matches.dists[n, m] != np.infty:
+ inlier_self[0, m] = 1.0
+ inlier_target[0, target_matches.ids[n, m]] = 1.0
+
+ PM.get().swapDataPoints(self, target)
+
+ final_inlier_self = self.getDescriptorViewByName("inliers")
+ final_inlier_target = target.getDescriptorViewByName("inliers")
+ self_ratio = np.count_nonzero(final_inlier_self) / final_inlier_self.shape[1]
+ target_ratio = np.count_nonzero(final_inlier_target) / final_inlier_target.shape[1]
+
+ print(f"{i} -> {j}: {self_ratio:.6}")
+ print(f"{j} -> {i}: {target_ratio:.6}")
+
+ overlap_results[j, i] = self_ratio
+ overlap_results[i, j] = target_ratio
+
+ if debug_mode:
+ self.save(f"{output_base_directory + output_base_file}_scan_i.vtk")
+ target.save(f"{output_base_directory + output_base_file}_scan_j.vtk")
+
+with open(f"{output_base_directory}overlap_results.csv", 'w') as out_file:
+ for x in range(overlap_results.shape[0]):
+ for y in range(overlap_results.shape[1]):
+ out_file.write(f"{overlap_results[x, y]:.6}, ")
+
+ out_file.write('\n')
diff --git a/examples/python/icp.py b/examples/python/icp.py
new file mode 100644
index 00000000..eb29d01d
--- /dev/null
+++ b/examples/python/icp.py
@@ -0,0 +1,117 @@
+# Code example for ICP taking 2 points clouds (2D or 3D) relatively close
+# and computing the transformation between them.
+#
+# This code is more complete than icp_simple. It can load parameter files and
+# has more options.
+
+import numpy as np
+
+from pypointmatcher import pointmatcher as pm
+from utils import parse_translation, parse_rotation
+
+PM = pm.PointMatcher
+DP = PM.DataPoints
+
+# Save transformation matrix in three different files:
+# - BASEFILENAME_inti_transfo.txt
+# - BASEFILENAME_icp_transfo.txt
+# - BASEFILENAME_complete_transfo.txt
+# (default: false)
+is_transfo_saved = False
+
+# Be more verbose (info logging to the console)
+is_verbose = True
+
+# Load the config from a YAML file (default: default.yaml)
+# Leave empty to set the ICP default configuration
+config_file = "../data/default.yaml"
+
+# Path of output directory (default: tests/icp/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/icp/"
+
+# Name of output files (default: test)
+output_base_file = "test"
+
+# Toggle to switch between 2D and 3D point clouds
+is_3D = True
+
+# Add an initial 3D translation before applying ICP (default: 0,0,0)
+# Add an initial 2D translation before applying ICP (default: 0,0)
+init_translation = "0,0,0" if is_3D else "0,0"
+# Add an initial 3D rotation before applying ICP (default: 1,0,0;0,1,0;0,0,1)
+# Add an initial 2D rotation before applying ICP (default: 1,0;0,1)
+init_rotation = "1,0,0;0,1,0;0,0,1" if is_3D else "1,0;0,1"
+
+if is_3D:
+ # Load 3D point clouds
+ ref = DP(DP.load('../data/car_cloud400.csv'))
+ data = DP(DP.load('../data/car_cloud401.csv'))
+ test_base = "3D"
+else:
+ # Load 2D point clouds
+ ref = DP(DP.load('../data/2D_twoBoxes.csv'))
+ data = DP(DP.load('../data/2D_oneBox.csv'))
+ test_base = "2D"
+
+# Create the default ICP algorithm
+icp = PM.ICP()
+
+if len(config_file) == 0:
+ # See the implementation of setDefault() to create a custom ICP algorithm
+ icp.setDefault()
+else:
+ # load YAML config
+ icp.loadFromYaml(config_file)
+
+cloud_dimension = ref.getEuclideanDim()
+
+assert cloud_dimension == 2 or cloud_dimension == 3, "Invalid input point clouds dimension"
+
+# Parse the translation and rotation to be used to compute the initial transformation
+translation = parse_translation(init_translation, cloud_dimension)
+rotation = parse_rotation(init_rotation, cloud_dimension)
+
+init_transfo = np.matmul(translation, rotation)
+
+rigid_trans = PM.get().TransformationRegistrar.create("RigidTransformation")
+
+if not rigid_trans.checkParameters(init_transfo):
+ print("Initial transformations is not rigid, identiy will be used")
+ init_transfo = np.identity(cloud_dimension + 1)
+
+initialized_data = rigid_trans.compute(data, init_transfo)
+
+# Compute the transformation to express data in ref
+T = icp(initialized_data, ref)
+
+if is_verbose:
+ print(f"match ratio: {icp.errorMinimizer.getWeightedPointUsedRatio():.6}")
+
+# Transform data to express it in ref
+data_out = DP(initialized_data)
+icp.transformations.apply(data_out, T)
+
+# Save files to see the results
+ref.save(f"{output_base_directory + test_base}_{output_base_file}_ref.vtk")
+data.save(f"{output_base_directory + test_base}_{output_base_file}_data_in.vtk")
+data_out.save(f"{output_base_directory + test_base}_{output_base_file}_data_out.vtk")
+
+if is_transfo_saved:
+ init_file_name = f"{output_base_directory + test_base}_{output_base_file}_init_transfo.txt"
+ icp_file_name = f"{output_base_directory + test_base}_{output_base_file}_icp.transfo.txt"
+ complete_file_name = f"{output_base_directory + test_base}_{output_base_file}_complete_transfo.txt"
+
+ with open(init_file_name, "w") as f:
+ f.write(f"{init_transfo}".replace("[", " ").replace("]", " "))
+
+ with open(icp_file_name, "w") as f:
+ f.write(f"{T}".replace("[", " ").replace("]", " "))
+
+ with open(complete_file_name, "w") as f:
+ f.write(f"{np.matmul(T, init_transfo)}".replace("[", " ").replace("]", " "))
+
+else:
+ if is_verbose:
+ print(f"{test_base} ICP transformation:\n{T}".replace("[", " ").replace("]", " "))
diff --git a/examples/python/icp_advance_api.py b/examples/python/icp_advance_api.py
new file mode 100644
index 00000000..5c7acacf
--- /dev/null
+++ b/examples/python/icp_advance_api.py
@@ -0,0 +1,187 @@
+# Code example for ICP taking 2 points clouds (2D or 3D) relatively close
+# and computing the transformation between them.
+#
+# This code is more complete than icp_simple. It can load parameter files and has more options.
+
+import numpy as np
+
+from math import sqrt
+from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
+from utils import parse_translation, parse_rotation
+
+PM = pm.PointMatcher
+DP = PM.DataPoints
+Parameters = pms.Parametrizable.Parameters
+
+# Save transformation matrix in three different files:
+# - BASEFILENAME_inti_transfo.txt
+# - BASEFILENAME_icp_transfo.txt
+# - BASEFILENAME_complete_transfo.txt
+# (default: false)
+is_transfo_saved = False
+
+# Be more verbose (info logging to the console)
+is_verbose = True
+
+# Load the config from a YAML file (default: default.yaml)
+config_file = "../data/default.yaml"
+
+# Path of output directory (default: tests/icp_advance_api/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/icp_advance_api/"
+
+# Name of output files (default: test)
+output_base_file = "test"
+
+# Toggle to switch between 2D and 3D clouds
+is_3D = True
+
+# Add an initial 3D translation before applying ICP (default: 0,0,0)
+# Add an initial 2D translation before applying ICP (default: 0,0)
+init_translation = "0,0,0" if is_3D else "0,0"
+# Add an initial 3D rotation before applying ICP (default: 1,0,0;0,1,0;0,0,1)
+# Add an initial 2D rotation before applying ICP (default: 1,0;0,1)
+init_rotation = "1,0,0;0,1,0;0,0,1" if is_3D else "1,0;0,1"
+
+if is_3D:
+ # 3D point clouds
+ ref = DP.load('../data/car_cloud400.csv')
+ data = DP.load('../data/car_cloud401.csv')
+ test_base = "3D"
+else:
+ # 2D point clouds
+ ref = DP.load('../data/2D_twoBoxes.csv')
+ data = DP.load('../data/2D_oneBox.csv')
+ test_base = "2D"
+
+# Create the default ICP algorithm
+icp = PM.ICP()
+
+if len(config_file) == 0:
+ # See the implementation of setDefault() to create a custom ICP algorithm
+ icp.setDefault()
+else:
+ # load YAML config
+ icp.loadFromYaml(config_file)
+
+cloud_dimension = ref.getEuclideanDim()
+
+assert cloud_dimension == 2 or cloud_dimension == 3, "Invalid input point clouds dimension"
+
+# Parse the translation and rotation to be used to compute the initial transformation
+translation = parse_translation(init_translation, cloud_dimension)
+rotation = parse_rotation(init_rotation, cloud_dimension)
+
+init_transfo = np.matmul(translation, rotation)
+
+rigid_trans = PM.get().TransformationRegistrar.create("RigidTransformation")
+
+if not rigid_trans.checkParameters(init_transfo):
+ print("Initial transformations is not rigid, identiy will be used")
+ init_transfo = np.identity(cloud_dimension + 1)
+
+initialized_data = rigid_trans.compute(data, init_transfo)
+
+# Compute the transformation to express data in ref
+T = icp(initialized_data, ref)
+match_ratio = icp.errorMinimizer.getWeightedPointUsedRatio()
+print(f"match ratio: {match_ratio:.6}")
+
+# Transform data to express it in ref
+data_out = DP(initialized_data)
+icp.transformations.apply(data_out, T)
+
+print("\n------------------")
+
+# START demo 1
+# Test for retrieving Haussdorff distance (with outliers). We generate new matching module
+# specifically for this purpose.
+#
+# INPUTS:
+# ref: point cloud used as reference
+# data_out: aligned point cloud (using the transformation outputted by icp)
+# icp: icp object used to aligned the point clouds
+
+params = Parameters()
+
+params["knn"] = "1" # for Hausdorff distance, we only need the first closest point
+params["epsilon"] = "0"
+matcher_Hausdorff = PM.get().MatcherRegistrar.create("KDTreeMatcher", params)
+
+# max. distance from reading to reference
+matcher_Hausdorff.init(ref)
+matches = matcher_Hausdorff.findClosests(data_out)
+max_dist1 = matches.getDistsQuantile(1.0)
+max_dist_robust1 = matches.getDistsQuantile(0.85)
+
+# max. distance from reference to reading
+matcher_Hausdorff.init(data_out)
+matches = matcher_Hausdorff.findClosests(ref)
+max_dist2 = matches.getDistsQuantile(1.0)
+max_dist_robust2 = matches.getDistsQuantile(0.85)
+
+haussdorff_dist = max(max_dist1, max_dist2)
+haussdorff_quantile_dist = max(max_dist_robust1, max_dist_robust2)
+
+print(f"Haussdorff distance: {sqrt(haussdorff_dist):.6} m")
+print(f"Haussdorff quantile distance: {sqrt(haussdorff_quantile_dist):.6} m")
+
+# START demo 2
+# Test for retrieving paired point mean distance without outliers.
+# We reuse the same module used for the icp object.
+#
+# INPUTS:
+# ref: point cloud used as reference
+# data_out: aligned point cloud (using the transformation outputted by icp)
+# icp: icp object used to aligned the point clouds
+
+# initiate the matching with unfiltered point cloud
+icp.matcher.init(ref)
+
+# extract closest points
+matches = icp.matcher.findClosests(data_out)
+
+# weight paired points
+outlier_weights = icp.outlierFilters.compute(data_out, ref, matches)
+
+# generate tuples of matched points and remove pairs with zero weight
+matched_points = PM.ErrorMinimizer.ErrorElements(data_out, ref, outlier_weights, matches)
+
+# extract relevant information for convenience
+dim = matched_points.reading.getEuclideanDim()
+nb_matched_points = matched_points.reading.getNbPoints()
+matched_read = matched_points.reading.features[:dim]
+matched_ref = matched_points.reference.features[:dim]
+
+# compute mean distance
+dist = np.linalg.norm(matched_read - matched_ref, axis=0)
+mean_dist = dist.sum() / nb_matched_points
+print(f"Robust mean distance: {mean_dist:.6} m")
+
+# End demo
+
+print("------------------\n")
+
+# Save files to see the results
+ref.save(f"{output_base_directory + test_base}_{output_base_file}_ref.vtk")
+data.save(f"{output_base_directory + test_base}_{output_base_file}_data_in.vtk")
+data_out.save(f"{output_base_directory + test_base}_{output_base_file}_data_out.vtk")
+
+if is_transfo_saved:
+ init_file_name = f"{output_base_directory + test_base}_{output_base_file}_init_transfo.txt"
+ icp_file_name = f"{output_base_directory + test_base}_{output_base_file}_icp.transfo.txt"
+ complete_file_name = f"{output_base_directory + test_base}_{output_base_file}_complete_transfo.txt"
+
+ with open(init_file_name, "w") as f:
+ f.write(f"{init_transfo}".replace("[", " ").replace("]", " "))
+
+ with open(icp_file_name, "w") as f:
+ f.write(f"{T}".replace("[", " ").replace("]", " "))
+
+ with open(complete_file_name, "w") as f:
+ f.write(f"{np.matmul(T, init_transfo)}".replace("[", " ").replace("]", " "))
+
+else:
+ if is_verbose:
+ print(f"{test_base} ICP transformation:\n{T}".replace("[", " ").replace("]", " "))
diff --git a/examples/python/icp_customized.py b/examples/python/icp_customized.py
new file mode 100644
index 00000000..a9c2afbd
--- /dev/null
+++ b/examples/python/icp_customized.py
@@ -0,0 +1,149 @@
+# Code example for ICP taking 2 points clouds (2D or 3D) relatively close
+# and computing the transformation between them.
+#
+# Instead of using yaml file for configuration, we configure the solution
+# directly in the code.
+#
+# This code replicate the solution in /evaluations/official_solutions/Besl92_pt2point.yaml
+
+from pypointmatcher import pointmatcher as pm, pointmatchersupport as pms
+
+PM = pm.PointMatcher
+DP = PM.DataPoints
+Parameters = pms.Parametrizable.Parameters
+
+# Path of output directory (default: tests/icp_customized/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/icp_customized/"
+
+# Name of output files (default: test)
+output_base_file = "test"
+
+# Toggle to switch between 2D and 3D clouds
+is_3D = True
+
+if is_3D:
+ # Load 3D point clouds
+ ref = DP(DP.load('../data/car_cloud400.csv'))
+ data = DP(DP.load('../data/car_cloud401.csv'))
+ test_base = "3D"
+else:
+ # Load 2D point clouds
+ ref = DP(DP.load('../data/2D_twoBoxes.csv'))
+ data = DP(DP.load('../data/2D_oneBox.csv'))
+ test_base = "2D"
+
+# Create the default ICP algrotithm
+icp = PM.ICP()
+params = Parameters()
+
+# Comment out to stop console outputs
+pms.setLogger(PM.get().LoggerRegistrar.create("FileLogger"))
+
+# Prepare reading filters
+name = "MinDistDataPointsFilter"
+params["minDist"] = "1.0"
+minDist_read = PM.get().DataPointsFilterRegistrar.create(name, params)
+params.clear()
+
+name = "RandomSamplingDataPointsFilter"
+params["prob"] = "0.05"
+rand_read = PM.get().DataPointsFilterRegistrar.create(name, params)
+params.clear()
+
+# Prepare reference filters
+name = "MinDistDataPointsFilter"
+params["minDist"] = "1.0"
+minDist_ref = PM.get().DataPointsFilterRegistrar.create(name, params)
+params.clear()
+
+name = "RandomSamplingDataPointsFilter"
+params["prob"] = "0.05"
+rand_ref = PM.get().DataPointsFilterRegistrar.create(name, params)
+params.clear()
+
+# Prepare matching function
+name = "KDTreeMatcher"
+params["knn"] = "1"
+params["epsilon"] = "3.16"
+kdtree = PM.get().MatcherRegistrar.create(name, params)
+params.clear()
+
+# Prepare outlier filters
+name = "TrimmedDistOutlierFilter"
+params["ratio"] = "0.75"
+trim = PM.get().OutlierFilterRegistrar.create(name, params)
+params.clear()
+
+# Prepare error minimization
+name = "PointToPointErrorMinimizer"
+pointToPoint = PM.get().ErrorMinimizerRegistrar.create(name)
+params.clear()
+
+# Prepare transformation checker filters
+name = "CounterTransformationChecker"
+params["maxIterationCount"] = "150"
+maxIter = PM.get().TransformationCheckerRegistrar.create(name, params)
+params.clear()
+
+name = "DifferentialTransformationChecker"
+params["minDiffRotErr"] = "0.001"
+params["minDiffTransErr"] = "0.01"
+params["smoothLength"] = "4"
+diff = PM.get().TransformationCheckerRegistrar.create(name, params)
+params.clear()
+
+# Prepare inspector
+# Comment out to write vtk files per iteration
+name = "NullInspector"
+nullInspect = PM.get().InspectorRegistrar.create(name)
+
+# Uncomment to write vtk files per iteration
+# name = "VTKFileInspector"
+# params["dumpDataLinks"] = "1"
+# params["dumpReading"] = "1"
+# params["dumpReference"] = "1"
+# vtkInspect = PM.get().InspectorRegistrar.create(name, params)
+# params.clear()
+
+# Prepare transformation
+name = "RigidTransformation"
+rigid_trans = PM.get().TransformationRegistrar.create(name)
+
+# Build ICP solution
+icp.readingDataPointsFilters.append(minDist_read)
+icp.readingDataPointsFilters.append(rand_read)
+
+icp.referenceDataPointsFilters.append(minDist_ref)
+icp.referenceDataPointsFilters.append(rand_ref)
+
+icp.matcher = kdtree
+
+icp.outlierFilters.append(trim)
+
+icp.errorMinimizer = pointToPoint
+
+icp.transformationCheckers.append(maxIter)
+icp.transformationCheckers.append(diff)
+
+# Toggle to write vtk files per iteration
+icp.inspector = nullInspect
+# icp.inspector = vtkInspect
+
+icp.transformations.append(rigid_trans)
+
+# Compute the transformation to express data in ref
+T = icp(data, ref)
+
+# Transform data to express it in ref
+data_out = DP(data)
+
+icp.transformations.apply(data_out, T)
+
+# Save files to see the results
+ref.save(f"{output_base_directory + test_base}_{output_base_file}_ref.vtk")
+data.save(f"{output_base_directory + test_base}_{output_base_file}_data_in.vtk")
+data_out.save(f"{output_base_directory + test_base}_{output_base_file}_data_out.vtk")
+
+print(f"{test_base} ICP transformation:\n{T}".replace("[", " ").replace("]", " "))
diff --git a/examples/python/icp_simple.py b/examples/python/icp_simple.py
new file mode 100644
index 00000000..b68367dd
--- /dev/null
+++ b/examples/python/icp_simple.py
@@ -0,0 +1,49 @@
+# Code example for ICP taking 2 points clouds (2D or 3D) relatively close
+# and computing the transformation between them.
+
+from pypointmatcher import pointmatcher as pm
+
+PM = pm.PointMatcher
+DP = PM.DataPoints
+
+# Path of output directory (default: tests/icp_simple/)
+# The output directory must already exist
+# Leave empty to save in the current directory
+output_base_directory = "tests/icp_simple/"
+
+# Name of output files (default: test)
+output_base_file = "test"
+
+# Toggle to switch between 2D and 3D clouds
+is_3D = True
+
+if is_3D:
+ # Load 3D point clouds
+ ref = DP(DP.load('../data/car_cloud400.csv'))
+ data = DP(DP.load('../data/car_cloud401.csv'))
+ test_base = "3D"
+else:
+ # Load 2D point clouds
+ ref = DP(DP.load('../data/2D_twoBoxes.csv'))
+ data = DP(DP.load('../data/2D_oneBox.csv'))
+ test_base = "2D"
+
+# Create the default ICP algorithm
+icp = PM.ICP()
+
+# See the implementation of setDefault() to create a custom ICP algorithm
+icp.setDefault()
+
+# Compute the transformation to express data in ref
+T = icp(data, ref)
+
+# Transform data to express it in ref
+data_out = DP(data)
+icp.transformations.apply(data_out, T)
+
+# Save files to see the results
+ref.save(f"{output_base_directory + test_base}_{output_base_file}_ref.vtk")
+data.save(f"{output_base_directory + test_base}_{output_base_file}_data_in.vtk")
+data_out.save(f"{output_base_directory + test_base}_{output_base_file}_data_out.vtk")
+
+print(f"Final {test_base} transformations:\n{T}\n".replace("[", " ").replace("]", " "))
diff --git a/examples/python/utils.py b/examples/python/utils.py
new file mode 100644
index 00000000..99cf294c
--- /dev/null
+++ b/examples/python/utils.py
@@ -0,0 +1,33 @@
+import numpy as np
+
+
+def list_modules():
+ # TODO
+ pass
+
+
+def parse_translation(p_translation: str, p_cloud_dim: int):
+ parsed_translation = np.identity(p_cloud_dim + 1)
+
+ p_translation = p_translation.replace(',', ' ')
+
+ translation_values = np.fromiter(p_translation.split(' '), np.float)
+
+ for i, v in enumerate(translation_values):
+ parsed_translation[i, p_cloud_dim] = v
+
+ return parsed_translation
+
+
+def parse_rotation(p_rotation: str, p_cloud_dim: int):
+ parsed_rotation = np.identity(p_cloud_dim + 1)
+
+ p_rotation = p_rotation.replace(',', ' ')
+ p_rotation = p_rotation.replace(';', ' ')
+
+ rotation_matrix = np.fromiter(p_rotation.split(' '), np.float)
+
+ for i, v in enumerate(rotation_matrix):
+ parsed_rotation[i // p_cloud_dim, i % p_cloud_dim] = v
+
+ return parsed_rotation
diff --git a/mkdocs.yml b/mkdocs.yml
index a8990fbf..102745ee 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -39,3 +39,6 @@ nav:
- 'Creating a DataPointsFilter': 'DataPointsFilterDev.md'
- 'Creating a Transformation': 'TransformationDev.md'
- 'Creating unit tests': 'UnitTestDev.md'
+- Python:
+ - 'Compiling libpointmatcher with Python': 'CompilationPython.md'
+ - 'Using libpointmatcher with Python': 'PythonModule.md'
diff --git a/pointmatcher/IO.cpp b/pointmatcher/IO.cpp
index a67ccd49..42103f55 100644
--- a/pointmatcher/IO.cpp
+++ b/pointmatcher/IO.cpp
@@ -1780,9 +1780,9 @@ template
class PointMatcherIO::PLYElement;
template
-class PointMatcherIO::PLYProperty;
+struct PointMatcherIO::PLYProperty;
template
-class PointMatcherIO::PLYProperty;
+struct PointMatcherIO::PLYProperty;
template
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
new file mode 100644
index 00000000..06aba8c9
--- /dev/null
+++ b/python/CMakeLists.txt
@@ -0,0 +1,121 @@
+if("${PYTHON_INSTALL_TARGET}" STREQUAL "")
+ message(STATUS "PYTHON_INSTALL_TARGET variable is not set, setting a default value...")
+ execute_process(COMMAND "python3" "-c" "import site; print(site.getsitepackages()[0])"
+ OUTPUT_VARIABLE PYTHON_INSTALL_TARGET OUTPUT_STRIP_TRAILING_WHITESPACE)
+endif()
+
+message(STATUS "The Python module will be install at this location : ${PYTHON_INSTALL_TARGET}")
+
+set(PYBIND11_SOURCES
+ #pointmatcher module
+ pointmatcher/data_points.cpp
+ pointmatcher/data_points_filter.cpp
+ pointmatcher/data_points_filters.cpp
+ pointmatcher/error_minimizer.cpp
+ pointmatcher/icp.cpp
+ pointmatcher/icp_chain_base.cpp
+ pointmatcher/icp_sequence.cpp
+ pointmatcher/impls/inspectors_impl.cpp
+ pointmatcher/impls/matchers_impl.cpp
+ pointmatcher/impls/outlier_filters_impl.cpp
+ pointmatcher/impls/transformations_impl.cpp
+ pointmatcher/impls/transformation_checkers_impl.cpp
+ pointmatcher/impl.cpp
+ pointmatcher/inspector.cpp
+ pointmatcher/io.cpp
+ pointmatcher/matcher.cpp
+ pointmatcher/matches.cpp
+ pointmatcher/outlier_filter.cpp
+ pointmatcher/outlier_filters.cpp
+ pointmatcher/point_matcher.cpp
+ pointmatcher/transformation.cpp
+ pointmatcher/transformations.cpp
+ pointmatcher/transformation_checker.cpp
+ pointmatcher/transformation_checkers.cpp
+
+ #pointmatchersupport module
+ pointmatchersupport/bibliography.cpp
+ pointmatchersupport/logger.cpp
+ pointmatchersupport/logger_impl.cpp
+ pointmatchersupport/parametrizable.cpp
+ pointmatchersupport/registrars/data_points_filter_registrar.cpp
+ pointmatchersupport/registrars/error_minimizer_registrar.cpp
+ pointmatchersupport/registrars/inspector_registrar.cpp
+ pointmatchersupport/registrars/logger_registrar.cpp
+ pointmatchersupport/registrars/matcher_registrar.cpp
+ pointmatchersupport/registrars/outlier_filter_registrar.cpp
+ pointmatchersupport/registrars/transformation_registrar.cpp
+ pointmatchersupport/registrars/transformation_checker_registrar.cpp
+ pointmatchersupport/registrar.cpp
+
+ #errorminimizers module
+ errorminimizers/identity.cpp
+ errorminimizers/point_to_plane.cpp
+ errorminimizers/point_to_plane_with_cov.cpp
+ errorminimizers/point_to_point.cpp
+ errorminimizers/point_to_point_similarity.cpp
+ errorminimizers/point_to_point_with_cov.cpp
+
+ #datapointfilters module
+ datapointsfilters/bounding_box.cpp
+ datapointsfilters/covariance_sampling.cpp
+ datapointsfilters/cut_at_descriptor_threshold.cpp
+ datapointsfilters/distance_limit.cpp
+ datapointsfilters/ellipsoids.cpp
+ datapointsfilters/fix_step_sampling.cpp
+ datapointsfilters/gestalt.cpp
+ datapointsfilters/identity.cpp
+ datapointsfilters/incidence_angle.cpp
+ datapointsfilters/max_density.cpp
+ datapointsfilters/max_pointcount.cpp
+ datapointsfilters/max_quantile_on_axis.cpp
+ datapointsfilters/normal_space.cpp
+ datapointsfilters/observation_direction.cpp
+ datapointsfilters/octree_grid.cpp
+ datapointsfilters/orient_normals.cpp
+ datapointsfilters/random_sampling.cpp
+ datapointsfilters/remove_nan.cpp
+ datapointsfilters/remove_sensor_bias.cpp
+ datapointsfilters/sampling_surface_normal.cpp
+ datapointsfilters/shadow.cpp
+ datapointsfilters/simple_sensor_noise.cpp
+ datapointsfilters/sphericality.cpp
+ datapointsfilters/surface_normal.cpp
+
+ modules/point_matcher_module.cpp
+ modules/point_matcher_support_module.cpp
+ modules/data_points_filters_module.cpp
+ modules/error_minimizers_module.cpp
+
+ # main module
+ pypoint_matcher.cpp)
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/pointmatcher
+ ${CMAKE_SOURCE_DIR}/pointmatcher/DataPointsFilters
+ ${CMAKE_SOURCE_DIR}/pointmatcher/DataPointsFilters/utils
+ ${CMAKE_SOURCE_DIR}/pointmatcher/ErrorMinimizers)
+
+if(USE_SYSTEM_PYBIND11)
+ find_package(pybind11 2.5.0 REQUIRED)
+ message(STATUS "pybind11 v${pybind11_VERSION}")
+else()
+ add_subdirectory(pybind11)
+ set(pybind11_FOUND TRUE)
+endif()
+
+if(pybind11_FOUND)
+ pybind11_add_module(pypointmatcher ${PYBIND11_SOURCES})
+
+ target_link_libraries(pypointmatcher
+ PUBLIC
+ pointmatcher)
+
+ add_dependencies(pypointmatcher pointmatcher)
+
+ install(TARGETS pypointmatcher LIBRARY DESTINATION ${PYTHON_INSTALL_TARGET})
+else()
+ message(FATAL_ERROR "pybind11 is required! Please follow the \"Compiling \
+libpointmatcher's with Python\" instructions from the official libpointmatcher's documentation.")
+endif()
+
diff --git a/python/datapointsfilters/bounding_box.cpp b/python/datapointsfilters/bounding_box.cpp
new file mode 100644
index 00000000..bd7da04e
--- /dev/null
+++ b/python/datapointsfilters/bounding_box.cpp
@@ -0,0 +1,30 @@
+#include "bounding_box.h"
+
+#include "DataPointsFilters/BoundingBox.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindBoundingBox(py::module& p_module)
+ {
+ using BoundingBoxDataPointsFilter = BoundingBoxDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "BoundingBoxDataPointsFilter")
+ .def_static("description", &BoundingBoxDataPointsFilter::description)
+ .def_static("availableParameters", &BoundingBoxDataPointsFilter::availableParameters)
+
+ .def_readonly("xMin", &BoundingBoxDataPointsFilter::xMin)
+ .def_readonly("xMax", &BoundingBoxDataPointsFilter::xMax)
+ .def_readonly("yMin", &BoundingBoxDataPointsFilter::yMin)
+ .def_readonly("yMax", &BoundingBoxDataPointsFilter::yMax)
+ .def_readonly("zMin", &BoundingBoxDataPointsFilter::zMin)
+ .def_readonly("zMax", &BoundingBoxDataPointsFilter::zMax)
+ .def_readonly("removeInside", &BoundingBoxDataPointsFilter::removeInside)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &BoundingBoxDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &BoundingBoxDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/bounding_box.h b/python/datapointsfilters/bounding_box.h
new file mode 100644
index 00000000..1f67813e
--- /dev/null
+++ b/python/datapointsfilters/bounding_box.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_BOUNDING_BOX_H
+#define PYTHON_DATAPOINTSFILTERS_BOUNDING_BOX_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindBoundingBox(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_BOUNDING_BOX_H
diff --git a/python/datapointsfilters/covariance_sampling.cpp b/python/datapointsfilters/covariance_sampling.cpp
new file mode 100644
index 00000000..9c2c7494
--- /dev/null
+++ b/python/datapointsfilters/covariance_sampling.cpp
@@ -0,0 +1,31 @@
+#include "covariance_sampling.h"
+
+#include "DataPointsFilters/CovarianceSampling.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindCovarianceSampling(py::module& p_module)
+ {
+ using CovarianceSamplingDataPointsFilter = CovarianceSamplingDataPointsFilter;
+ py::class_, DataPointsFilter> cosamplingClass(p_module, "CovarianceSamplingDataPointsFilter");
+
+ using TorqueNormMethod = CovarianceSamplingDataPointsFilter::TorqueNormMethod;
+ py::enum_(cosamplingClass, "TorqueNormMethod").value("L1", TorqueNormMethod::L1)
+ .value("Lavg", TorqueNormMethod::Lavg).value("Lmax", TorqueNormMethod::Lmax).export_values();
+
+ cosamplingClass.def_static("description", &CovarianceSamplingDataPointsFilter::description)
+ .def_static("availableParameters", &CovarianceSamplingDataPointsFilter::availableParameters)
+ .def_static("computeConditionNumber", &CovarianceSamplingDataPointsFilter::computeConditionNumber, py::arg("cov"))
+
+ .def_readwrite("nbSample", &CovarianceSamplingDataPointsFilter::nbSample)
+ .def_readonly("normalizationMethod", &CovarianceSamplingDataPointsFilter::normalizationMethod)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &CovarianceSamplingDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &CovarianceSamplingDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/covariance_sampling.h b/python/datapointsfilters/covariance_sampling.h
new file mode 100644
index 00000000..0927d61e
--- /dev/null
+++ b/python/datapointsfilters/covariance_sampling.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_COVARIANCE_SAMPLING_H
+#define PYTHON_DATAPOINTSFILTERS_COVARIANCE_SAMPLING_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindCovarianceSampling(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_COVARIANCE_SAMPLING_H
diff --git a/python/datapointsfilters/cut_at_descriptor_threshold.cpp b/python/datapointsfilters/cut_at_descriptor_threshold.cpp
new file mode 100644
index 00000000..34a537a9
--- /dev/null
+++ b/python/datapointsfilters/cut_at_descriptor_threshold.cpp
@@ -0,0 +1,26 @@
+#include "cut_at_descriptor_threshold.h"
+
+#include "DataPointsFilters/CutAtDescriptorThreshold.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindCutAtDescriptorThreshold(py::module& p_module)
+ {
+ using CutAtDescriptorThresholdDataPointsFilter = CutAtDescriptorThresholdDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "CutAtDescriptorThresholdDataPointsFilter")
+ .def_static("description", &CutAtDescriptorThresholdDataPointsFilter::description)
+ .def_static("availableParameters", &CutAtDescriptorThresholdDataPointsFilter::availableParameters)
+
+ .def_readonly("descName", &CutAtDescriptorThresholdDataPointsFilter::descName)
+ .def_readonly("useLargerThan", &CutAtDescriptorThresholdDataPointsFilter::useLargerThan)
+ .def_readonly("threshold", &CutAtDescriptorThresholdDataPointsFilter::threshold)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &CutAtDescriptorThresholdDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &CutAtDescriptorThresholdDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/cut_at_descriptor_threshold.h b/python/datapointsfilters/cut_at_descriptor_threshold.h
new file mode 100644
index 00000000..152df095
--- /dev/null
+++ b/python/datapointsfilters/cut_at_descriptor_threshold.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_CUT_AT_DESCRIPTOR_THRESHOLD_H
+#define PYTHON_DATAPOINTSFILTERS_CUT_AT_DESCRIPTOR_THRESHOLD_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindCutAtDescriptorThreshold(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_CUT_AT_DESCRIPTOR_THRESHOLD_H
diff --git a/python/datapointsfilters/distance_limit.cpp b/python/datapointsfilters/distance_limit.cpp
new file mode 100644
index 00000000..79153f6f
--- /dev/null
+++ b/python/datapointsfilters/distance_limit.cpp
@@ -0,0 +1,26 @@
+#include "distance_limit.h"
+
+#include "DataPointsFilters/DistanceLimit.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindDistanceLimit(py::module& p_module)
+ {
+ using DistanceLimitDataPointsFilter = DistanceLimitDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "DistanceLimitDataPointsFilter")
+ .def_static("description", &DistanceLimitDataPointsFilter::description)
+ .def_static("availableParameters", &DistanceLimitDataPointsFilter::availableParameters)
+
+ .def_readonly("dim", &DistanceLimitDataPointsFilter::dim)
+ .def_readonly("dist", &DistanceLimitDataPointsFilter::dist)
+ .def_readonly("removeInside", &DistanceLimitDataPointsFilter::removeInside)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &DistanceLimitDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &DistanceLimitDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/distance_limit.h b/python/datapointsfilters/distance_limit.h
new file mode 100644
index 00000000..d166e345
--- /dev/null
+++ b/python/datapointsfilters/distance_limit.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_DISTANCE_LIMIT_H
+#define PYTHON_DATAPOINTSFILTERS_DISTANCE_LIMIT_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindDistanceLimit(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_DISTANCE_LIMIT_H
diff --git a/python/datapointsfilters/ellipsoids.cpp b/python/datapointsfilters/ellipsoids.cpp
new file mode 100644
index 00000000..9c81adbb
--- /dev/null
+++ b/python/datapointsfilters/ellipsoids.cpp
@@ -0,0 +1,40 @@
+#include "ellipsoids.h"
+
+#include "DataPointsFilters/Elipsoids.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindEllipsoids(py::module& p_module)
+ {
+ using ElipsoidsDataPointsFilter = ElipsoidsDataPointsFilter;
+ py::class_, DataPointsFilter> ellipsoidClass(p_module, "EllipsoidsDataPointsFilter");
+
+ ellipsoidClass.def_static("description", &ElipsoidsDataPointsFilter::description)
+ .def_static("availableParameters", &ElipsoidsDataPointsFilter::availableParameters)
+
+ .def_readonly("ratio", &ElipsoidsDataPointsFilter::ratio)
+ .def_readonly("knn", &ElipsoidsDataPointsFilter::knn)
+ .def_readonly("samplingMethod", &ElipsoidsDataPointsFilter::samplingMethod)
+ .def_readonly("maxBoxDim", &ElipsoidsDataPointsFilter::maxBoxDim)
+ .def_readonly("maxTimeWindow", &ElipsoidsDataPointsFilter::maxTimeWindow)
+ .def_readonly("minPlanarity", &ElipsoidsDataPointsFilter::minPlanarity)
+ .def_readonly("averageExistingDescriptors", &ElipsoidsDataPointsFilter::averageExistingDescriptors)
+ .def_readonly("keepNormals", &ElipsoidsDataPointsFilter::keepNormals)
+ .def_readonly("keepDensities", &ElipsoidsDataPointsFilter::keepDensities)
+ .def_readonly("keepEigenValues", &ElipsoidsDataPointsFilter::keepEigenValues)
+ .def_readonly("keepEigenVectors", &ElipsoidsDataPointsFilter::keepEigenVectors)
+ .def_readonly("keepCovariances", &ElipsoidsDataPointsFilter::keepCovariances)
+ .def_readonly("keepWeights", &ElipsoidsDataPointsFilter::keepWeights)
+ .def_readonly("keepMeans", &ElipsoidsDataPointsFilter::keepMeans)
+ .def_readonly("keepShapes", &ElipsoidsDataPointsFilter::keepShapes)
+ .def_readonly("keepIndices", &ElipsoidsDataPointsFilter::keepIndices)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &ElipsoidsDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &ElipsoidsDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/ellipsoids.h b/python/datapointsfilters/ellipsoids.h
new file mode 100644
index 00000000..10b39be7
--- /dev/null
+++ b/python/datapointsfilters/ellipsoids.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_ELLIPSOIDS_H
+#define PYTHON_DATAPOINTSFILTERS_ELLIPSOIDS_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindEllipsoids(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_ELLIPSOIDS_H
diff --git a/python/datapointsfilters/fix_step_sampling.cpp b/python/datapointsfilters/fix_step_sampling.cpp
new file mode 100644
index 00000000..b380865a
--- /dev/null
+++ b/python/datapointsfilters/fix_step_sampling.cpp
@@ -0,0 +1,27 @@
+#include "fix_step_sampling.h"
+
+#include "DataPointsFilters/FixStepSampling.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindFixStepSampling(py::module& p_module)
+ {
+ using FixStepSamplingDataPointsFilter = FixStepSamplingDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "FixStepSamplingDataPointsFilter")
+ .def_static("description", &FixStepSamplingDataPointsFilter::description)
+ .def_static("availableParameters", &FixStepSamplingDataPointsFilter::availableParameters)
+
+ .def_readonly("startStep", &FixStepSamplingDataPointsFilter::startStep)
+ .def_readonly("endStep", &FixStepSamplingDataPointsFilter::endStep)
+ .def_readonly("stepMult", &FixStepSamplingDataPointsFilter::stepMult)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("init", &FixStepSamplingDataPointsFilter::init)
+ .def("filter", &FixStepSamplingDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &FixStepSamplingDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/fix_step_sampling.h b/python/datapointsfilters/fix_step_sampling.h
new file mode 100644
index 00000000..478632f5
--- /dev/null
+++ b/python/datapointsfilters/fix_step_sampling.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_FIX_STEP_SAMPLING_H
+#define PYTHON_DATAPOINTSFILTERS_FIX_STEP_SAMPLING_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindFixStepSampling(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_FIX_STEP_SAMPLING_H
diff --git a/python/datapointsfilters/gestalt.cpp b/python/datapointsfilters/gestalt.cpp
new file mode 100644
index 00000000..9cad667f
--- /dev/null
+++ b/python/datapointsfilters/gestalt.cpp
@@ -0,0 +1,41 @@
+#include "gestalt.h"
+
+#include "DataPointsFilters/Gestalt.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindGestalt(py::module& p_module)
+ {
+ using GestaltDataPointsFilter = GestaltDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "GestaltDataPointsFilter")
+ .def_static("description", &GestaltDataPointsFilter::description)
+ .def_static("availableParameters", &GestaltDataPointsFilter::availableParameters)
+
+ .def_readonly("ratio", &GestaltDataPointsFilter::ratio)
+ .def_readonly("radius", &GestaltDataPointsFilter::radius)
+ .def_readonly("knn", &GestaltDataPointsFilter::knn)
+ .def_readonly("vSizeX", &GestaltDataPointsFilter::vSizeX)
+ .def_readonly("vSizeY", &GestaltDataPointsFilter::vSizeY)
+ .def_readonly("vSizeZ", &GestaltDataPointsFilter::vSizeZ)
+ .def_readonly("maxBoxDim", &GestaltDataPointsFilter::maxBoxDim)
+ .def_readonly("maxTimeWindow", &GestaltDataPointsFilter::maxTimeWindow)
+ .def_readonly("keepMeans", &GestaltDataPointsFilter::keepMeans)
+ .def_readonly("averageExistingDescriptors", &GestaltDataPointsFilter::averageExistingDescriptors)
+ .def_readonly("keepNormals", &GestaltDataPointsFilter::keepNormals)
+ .def_readonly("keepEigenValues", &GestaltDataPointsFilter::keepEigenValues)
+ .def_readonly("keepEigenVectors", &GestaltDataPointsFilter::keepEigenVectors)
+ .def_readonly("keepCovariances", &GestaltDataPointsFilter::keepCovariances)
+ .def_readonly("keepGestaltFeatures", &GestaltDataPointsFilter::keepGestaltFeatures)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &GestaltDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &GestaltDataPointsFilter::inPlaceFilter, py::arg("cloud"))
+ .def("serializeGestaltMatrix", &GestaltDataPointsFilter::serializeGestaltMatrix, py::arg("gestaltFeatures"))
+ .def("calculateAngles", &GestaltDataPointsFilter::calculateAngles, py::arg("points"), py::arg("keyPoint"))
+ .def("calculateRadii", &GestaltDataPointsFilter::calculateRadii, py::arg("points"), py::arg("keyPoint"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/gestalt.h b/python/datapointsfilters/gestalt.h
new file mode 100644
index 00000000..db54c0a8
--- /dev/null
+++ b/python/datapointsfilters/gestalt.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_GESTALT_H
+#define PYTHON_DATAPOINTSFILTERS_GESTALT_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindGestalt(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_GESTALT_H
diff --git a/python/datapointsfilters/identity.cpp b/python/datapointsfilters/identity.cpp
new file mode 100644
index 00000000..f7d3f210
--- /dev/null
+++ b/python/datapointsfilters/identity.cpp
@@ -0,0 +1,21 @@
+#include "identity.h"
+
+#include "DataPointsFilters/Identity.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindIdentity(py::module& p_module)
+ {
+ using IdentityDataPointsFilter = IdentityDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "IdentityDataPointsFilter")
+ .def_static("description", &IdentityDataPointsFilter::description)
+
+ .def(py::init<>())
+
+ .def("filter", &IdentityDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &IdentityDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/identity.h b/python/datapointsfilters/identity.h
new file mode 100644
index 00000000..fe8c900c
--- /dev/null
+++ b/python/datapointsfilters/identity.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_IDENTITY_H
+#define PYTHON_DATAPOINTSFILTERS_IDENTITY_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindIdentity(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_IDENTITY_H
diff --git a/python/datapointsfilters/incidence_angle.cpp b/python/datapointsfilters/incidence_angle.cpp
new file mode 100644
index 00000000..193fe733
--- /dev/null
+++ b/python/datapointsfilters/incidence_angle.cpp
@@ -0,0 +1,21 @@
+#include "incidence_angle.h"
+
+#include "DataPointsFilters/IncidenceAngle.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindIncidenceAngle(py::module& p_module)
+ {
+ using IncidenceAngleDataPointsFilter = IncidenceAngleDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "IncidenceAngleDataPointsFilter")
+ .def_static("description", &IncidenceAngleDataPointsFilter::description)
+
+ .def(py::init<>())
+
+ .def("filter", &IncidenceAngleDataPointsFilter::filter, py::arg("input"))
+ .def("inPlaceFilter", &IncidenceAngleDataPointsFilter::inPlaceFilter, py::arg("cloud"));
+ }
+ }
+}
diff --git a/python/datapointsfilters/incidence_angle.h b/python/datapointsfilters/incidence_angle.h
new file mode 100644
index 00000000..64769d94
--- /dev/null
+++ b/python/datapointsfilters/incidence_angle.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_INCIDENCE_ANGLE_H
+#define PYTHON_DATAPOINTSFILTERS_INCIDENCE_ANGLE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindIncidenceAngle(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_INCIDENCE_ANGLE_H
diff --git a/python/datapointsfilters/max_density.cpp b/python/datapointsfilters/max_density.cpp
new file mode 100644
index 00000000..8169a02a
--- /dev/null
+++ b/python/datapointsfilters/max_density.cpp
@@ -0,0 +1,24 @@
+#include "max_density.h"
+
+#include "DataPointsFilters/MaxDensity.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindMaxDensity(py::module& p_module)
+ {
+ using MaxDensityDataPointsFilter = MaxDensityDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "MaxDensityDataPointsFilter")
+ .def_static("description", &MaxDensityDataPointsFilter::description)
+ .def_static("availableParameters", &MaxDensityDataPointsFilter::availableParameters)
+
+ .def_readonly("maxDensity", &MaxDensityDataPointsFilter::maxDensity)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &MaxDensityDataPointsFilter::filter)
+ .def("inPlaceFilter", &MaxDensityDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/max_density.h b/python/datapointsfilters/max_density.h
new file mode 100644
index 00000000..a3a3d981
--- /dev/null
+++ b/python/datapointsfilters/max_density.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_MAX_DENSITY_H
+#define PYTHON_DATAPOINTSFILTERS_MAX_DENSITY_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindMaxDensity(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_MAX_DENSITY_H
diff --git a/python/datapointsfilters/max_pointcount.cpp b/python/datapointsfilters/max_pointcount.cpp
new file mode 100644
index 00000000..bc3497af
--- /dev/null
+++ b/python/datapointsfilters/max_pointcount.cpp
@@ -0,0 +1,25 @@
+#include "max_pointcount.h"
+
+#include "DataPointsFilters/MaxPointCount.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindMaxPointCount(py::module& p_module)
+ {
+ using MaxPointCountDataPointsFilter = MaxPointCountDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "MaxPointCountDataPointsFilter")
+ .def_static("description", &MaxPointCountDataPointsFilter::description)
+ .def_static("availableParameters", &MaxPointCountDataPointsFilter::availableParameters)
+
+ .def_readonly("maxCount", &MaxPointCountDataPointsFilter::maxCount)
+ .def_readonly("seed", &MaxPointCountDataPointsFilter::seed)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &MaxPointCountDataPointsFilter::filter)
+ .def("inPlaceFilter", &MaxPointCountDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/max_pointcount.h b/python/datapointsfilters/max_pointcount.h
new file mode 100644
index 00000000..edfd0c47
--- /dev/null
+++ b/python/datapointsfilters/max_pointcount.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_MAX_POINTCOUNT_H
+#define PYTHON_DATAPOINTSFILTERS_MAX_POINTCOUNT_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindMaxPointCount(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_MAX_POINTCOUNT_H
diff --git a/python/datapointsfilters/max_quantile_on_axis.cpp b/python/datapointsfilters/max_quantile_on_axis.cpp
new file mode 100644
index 00000000..299389bb
--- /dev/null
+++ b/python/datapointsfilters/max_quantile_on_axis.cpp
@@ -0,0 +1,25 @@
+#include "max_quantile_on_axis.h"
+
+#include "DataPointsFilters/MaxQuantileOnAxis.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindMaxQuantileOnAxis(py::module& p_module)
+ {
+ using MaxQuantileOnAxisDataPointsFilter = MaxQuantileOnAxisDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "MaxQuantileOnAxisDataPointsFilter")
+ .def_static("description", &MaxQuantileOnAxisDataPointsFilter::description)
+ .def_static("availableParameters", &MaxQuantileOnAxisDataPointsFilter::availableParameters)
+
+ .def_readonly("dim", &MaxQuantileOnAxisDataPointsFilter::dim)
+ .def_readonly("ratio", &MaxQuantileOnAxisDataPointsFilter::ratio)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &MaxQuantileOnAxisDataPointsFilter::filter)
+ .def("inPlaceFilter", &MaxQuantileOnAxisDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/max_quantile_on_axis.h b/python/datapointsfilters/max_quantile_on_axis.h
new file mode 100644
index 00000000..8f7c2b03
--- /dev/null
+++ b/python/datapointsfilters/max_quantile_on_axis.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_MAX_QUANTILE_ON_AXIS_H
+#define PYTHON_DATAPOINTSFILTERS_MAX_QUANTILE_ON_AXIS_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindMaxQuantileOnAxis(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_MAX_QUANTILE_ON_AXIS_H
diff --git a/python/datapointsfilters/normal_space.cpp b/python/datapointsfilters/normal_space.cpp
new file mode 100644
index 00000000..40e346b4
--- /dev/null
+++ b/python/datapointsfilters/normal_space.cpp
@@ -0,0 +1,26 @@
+#include "normal_space.h"
+
+#include "DataPointsFilters/NormalSpace.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindNormalSpace(py::module& p_module)
+ {
+ using NormalSpaceDataPointsFilter = NormalSpaceDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "NormalSpaceDataPointsFilter")
+ .def_static("description", &NormalSpaceDataPointsFilter::description)
+ .def_static("availableParameters", &NormalSpaceDataPointsFilter::availableParameters)
+
+ .def_readonly("nbSample", &NormalSpaceDataPointsFilter::nbSample)
+ .def_readonly("seed", &NormalSpaceDataPointsFilter::seed)
+ .def_readonly("epsilon", &NormalSpaceDataPointsFilter::epsilon)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &NormalSpaceDataPointsFilter::filter)
+ .def("inPlaceFilter", &NormalSpaceDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/normal_space.h b/python/datapointsfilters/normal_space.h
new file mode 100644
index 00000000..7a8775ea
--- /dev/null
+++ b/python/datapointsfilters/normal_space.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_NORMAL_SPACE_H
+#define PYTHON_DATAPOINTSFILTERS_NORMAL_SPACE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindNormalSpace(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_NORMAL_SPACE_H
diff --git a/python/datapointsfilters/observation_direction.cpp b/python/datapointsfilters/observation_direction.cpp
new file mode 100644
index 00000000..c927d701
--- /dev/null
+++ b/python/datapointsfilters/observation_direction.cpp
@@ -0,0 +1,26 @@
+#include "observation_direction.h"
+
+#include "DataPointsFilters/ObservationDirection.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindObservationDirection(py::module& p_module)
+ {
+ using ObservationDirectionDataPointsFilter = ObservationDirectionDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "ObservationDirectionDataPointsFilter")
+ .def_static("description", &ObservationDirectionDataPointsFilter::description)
+ .def_static("availableParameters", &ObservationDirectionDataPointsFilter::availableParameters)
+
+ .def_readonly("centerX", &ObservationDirectionDataPointsFilter::centerX)
+ .def_readonly("centerY", &ObservationDirectionDataPointsFilter::centerY)
+ .def_readonly("centerZ", &ObservationDirectionDataPointsFilter::centerZ)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &ObservationDirectionDataPointsFilter::filter)
+ .def("inPlaceFilter", &ObservationDirectionDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/observation_direction.h b/python/datapointsfilters/observation_direction.h
new file mode 100644
index 00000000..e3beac5a
--- /dev/null
+++ b/python/datapointsfilters/observation_direction.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_OBSERVATION_DIRECTION_H
+#define PYTHON_DATAPOINTSFILTERS_OBSERVATION_DIRECTION_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindObservationDirection(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_OBSERVATION_DIRECTION_H
diff --git a/python/datapointsfilters/octree_grid.cpp b/python/datapointsfilters/octree_grid.cpp
new file mode 100644
index 00000000..e5537345
--- /dev/null
+++ b/python/datapointsfilters/octree_grid.cpp
@@ -0,0 +1,78 @@
+#include "octree_grid.h"
+
+#include "DataPointsFilters/OctreeGrid.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindOctreeGrid(py::module& p_module)
+ {
+ using OctreeGridDataPointsFilter = OctreeGridDataPointsFilter;
+ py::class_, DataPointsFilter> octreegridClass(p_module, "OctreeGridDataPointsFilter");
+
+ octreegridClass.doc() = R"pbdoc(
+Data Filter based on Octree representation
+
+Processings are applyed via a Visitor through Depth-first search in the Octree (DFS)
+i.e. for each node, the Visitor/Callback is called
+)pbdoc";
+
+ using FirstPtsSampler = OctreeGridDataPointsFilter::FirstPtsSampler;
+ py::class_ firstptssamplerClass(octreegridClass, "FirstPtsSampler", "Visitors class to apply processing");
+
+ firstptssamplerClass.def_readwrite("idx", &FirstPtsSampler::idx)
+// .def_readwrite("pts", &FirstPtsSampler::pts, py::return_value_policy::reference) FIXME
+ .def_readwrite("mapidx", &FirstPtsSampler::mapidx, "Build map of (old index to new index), in case we sample pts at the begining of the pointcloud")
+
+ .def(py::init(), py::arg("dp"))
+
+// .def("__call__", &FirstPtsSampler::operator()<2>, py::arg("oc")) FIXME
+// .def("__call__", &FirstPtsSampler::operator()<3>, py::arg("oc")) FIXME
+ .def("finalize", &FirstPtsSampler::finalize);
+
+ using RandomPtsSampler = OctreeGridDataPointsFilter::RandomPtsSampler;
+ py::class_(firstptssamplerClass, "RandomPtsSampler")
+ .def_readonly("seed", &RandomPtsSampler::seed)
+
+ .def(py::init(), py::arg("dp"))
+ .def(py::init(), py::arg("dp"), py::arg("seed_"))
+
+// .def("__call__", &RandomPtsSampler::operator()<2>, py::arg("oc")) FIXME
+// .def("__call__", &RandomPtsSampler::operator()<3>, py::arg("oc")) FIXME
+ .def("finalize", &RandomPtsSampler::finalize);
+
+ using CentroidSampler = OctreeGridDataPointsFilter::CentroidSampler;
+ py::class_(firstptssamplerClass, "CentroidSampler")
+ .def(py::init(), py::arg("dp"));
+
+// .def("__call__", &CentroidSampler::operator()<2>, py::arg("oc")) FIXME
+// .def("__call__", &CentroidSampler::operator()<3>, py::arg("oc")); FIXME
+
+ using MedoidSampler = OctreeGridDataPointsFilter::MedoidSampler;
+ py::class_(firstptssamplerClass, "MedoidSampler", "Nearest point from the centroid (contained in the cloud)")
+ .def(py::init(), py::arg("dp"));
+
+// .def("__call__", &RandomPtsSampler::operator()<2>, py::arg("oc")) FIXME
+// .def("__call__", &RandomPtsSampler::operator()<3>, py::arg("oc")); FIXME
+
+ using SamplingMethod = OctreeGridDataPointsFilter::SamplingMethod;
+ py::enum_(octreegridClass, "SamplingMethod").value("FIRST_PTS", SamplingMethod::FIRST_PTS)
+ .value("RAND_PTS", SamplingMethod::RAND_PTS).value("CENTROID", SamplingMethod::CENTROID)
+ .value("MEDOID", SamplingMethod::MEDOID).export_values();
+
+ octreegridClass.def_static("description", &OctreeGridDataPointsFilter::description)
+ .def_static("availableParameters", &OctreeGridDataPointsFilter::availableParameters)
+
+ .def_readwrite("centerX", &OctreeGridDataPointsFilter::buildParallel)
+ .def_readwrite("centerY", &OctreeGridDataPointsFilter::maxPointByNode)
+ .def_readwrite("centerY", &OctreeGridDataPointsFilter::maxSizeByNode)
+ .def_readonly("centerZ", &OctreeGridDataPointsFilter::samplingMethod)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &OctreeGridDataPointsFilter::filter)
+ .def("inPlaceFilter", &OctreeGridDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/octree_grid.h b/python/datapointsfilters/octree_grid.h
new file mode 100644
index 00000000..3092c663
--- /dev/null
+++ b/python/datapointsfilters/octree_grid.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_OCTREE_GRID_H
+#define PYTHON_DATAPOINTSFILTERS_OCTREE_GRID_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindOctreeGrid(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_OCTREE_GRID_H
diff --git a/python/datapointsfilters/orient_normals.cpp b/python/datapointsfilters/orient_normals.cpp
new file mode 100644
index 00000000..05edac79
--- /dev/null
+++ b/python/datapointsfilters/orient_normals.cpp
@@ -0,0 +1,25 @@
+#include "orient_normals.h"
+
+#include "DataPointsFilters/OrientNormals.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindOrientNormals(py::module& p_module)
+ {
+ using OrientNormalsDataPointsFilter = OrientNormalsDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "OrientNormalsDataPointsFilter", "Reorientation of normals")
+
+ .def_static("description", &OrientNormalsDataPointsFilter::description)
+ .def_static("availableParameters", &OrientNormalsDataPointsFilter::availableParameters)
+
+ .def_readonly("towardCenter", &OrientNormalsDataPointsFilter::towardCenter)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &OrientNormalsDataPointsFilter::filter)
+ .def("inPlaceFilter", &OrientNormalsDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/orient_normals.h b/python/datapointsfilters/orient_normals.h
new file mode 100644
index 00000000..faea4e00
--- /dev/null
+++ b/python/datapointsfilters/orient_normals.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_ORIENT_NORMALS_H
+#define PYTHON_DATAPOINTSFILTERS_ORIENT_NORMALS_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindOrientNormals(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_ORIENT_NORMALS_H
diff --git a/python/datapointsfilters/random_sampling.cpp b/python/datapointsfilters/random_sampling.cpp
new file mode 100644
index 00000000..59f754da
--- /dev/null
+++ b/python/datapointsfilters/random_sampling.cpp
@@ -0,0 +1,25 @@
+#include "random_sampling.h"
+
+#include "DataPointsFilters/RandomSampling.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindRandomSampling(py::module& p_module)
+ {
+ using RandomSamplingDataPointsFilter = RandomSamplingDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "RandomSamplingDataPointsFilter", "Random sampling")
+
+ .def_static("description", &RandomSamplingDataPointsFilter::description)
+ .def_static("availableParameters", &RandomSamplingDataPointsFilter::availableParameters)
+
+ .def_readonly("prob", &RandomSamplingDataPointsFilter::prob)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &RandomSamplingDataPointsFilter::filter)
+ .def("inPlaceFilter", &RandomSamplingDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/random_sampling.h b/python/datapointsfilters/random_sampling.h
new file mode 100644
index 00000000..10c1a4c8
--- /dev/null
+++ b/python/datapointsfilters/random_sampling.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_RANDOM_SAMPLING_H
+#define PYTHON_DATAPOINTSFILTERS_RANDOM_SAMPLING_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindRandomSampling(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_RANDOM_SAMPLING_H
diff --git a/python/datapointsfilters/remove_nan.cpp b/python/datapointsfilters/remove_nan.cpp
new file mode 100644
index 00000000..a0225a1f
--- /dev/null
+++ b/python/datapointsfilters/remove_nan.cpp
@@ -0,0 +1,22 @@
+#include "remove_nan.h"
+
+#include "DataPointsFilters/RemoveNaN.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindRemoveNaN(py::module& p_module)
+ {
+ using RemoveNaNDataPointsFilter = RemoveNaNDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "RemoveNaNDataPointsFilter", "Remove points having NaN as coordinate")
+
+ .def_static("description", &RemoveNaNDataPointsFilter::description)
+
+ .def(py::init<>())
+
+ .def("filter", &RemoveNaNDataPointsFilter::filter)
+ .def("inPlaceFilter", &RemoveNaNDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/remove_nan.h b/python/datapointsfilters/remove_nan.h
new file mode 100644
index 00000000..ea385a19
--- /dev/null
+++ b/python/datapointsfilters/remove_nan.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_REMOVE_NAN_H
+#define PYTHON_DATAPOINTSFILTERS_REMOVE_NAN_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindRemoveNaN(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_REMOVE_NAN_H
diff --git a/python/datapointsfilters/remove_sensor_bias.cpp b/python/datapointsfilters/remove_sensor_bias.cpp
new file mode 100644
index 00000000..6794a7f6
--- /dev/null
+++ b/python/datapointsfilters/remove_sensor_bias.cpp
@@ -0,0 +1,23 @@
+#include "remove_sensor_bias.h"
+
+#include "DataPointsFilters/RemoveSensorBias.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindRemoveSensorBias(py::module& p_module)
+ {
+ using RemoveSensorBiasDataPointsFilter = RemoveSensorBiasDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "RemoveSensorBiasDataPointsFilter")
+
+ .def_static("description", &RemoveSensorBiasDataPointsFilter::description)
+ .def_static("availableParameters", &RemoveSensorBiasDataPointsFilter::availableParameters)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &RemoveSensorBiasDataPointsFilter::filter)
+ .def("inPlaceFilter", &RemoveSensorBiasDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/remove_sensor_bias.h b/python/datapointsfilters/remove_sensor_bias.h
new file mode 100644
index 00000000..650c1a41
--- /dev/null
+++ b/python/datapointsfilters/remove_sensor_bias.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_REMOVE_SENSOR_BIAS_H
+#define PYTHON_DATAPOINTSFILTERS_REMOVE_SENSOR_BIAS_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindRemoveSensorBias(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_REMOVE_SENSOR_BIAS_H
diff --git a/python/datapointsfilters/sampling_surface_normal.cpp b/python/datapointsfilters/sampling_surface_normal.cpp
new file mode 100644
index 00000000..ef70e950
--- /dev/null
+++ b/python/datapointsfilters/sampling_surface_normal.cpp
@@ -0,0 +1,33 @@
+#include "sampling_surface_normal.h"
+
+#include "DataPointsFilters/SamplingSurfaceNormal.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSamplingSurfaceNormal(py::module& p_module)
+ {
+ using SamplingSurfaceNormalDataPointsFilter = SamplingSurfaceNormalDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "SamplingSurfaceNormalDataPointsFilter")
+
+ .def_static("description", &SamplingSurfaceNormalDataPointsFilter::description)
+ .def_static("availableParameters", &SamplingSurfaceNormalDataPointsFilter::availableParameters)
+
+ .def_readonly("ratio", &SamplingSurfaceNormalDataPointsFilter::ratio)
+ .def_readonly("knn", &SamplingSurfaceNormalDataPointsFilter::knn)
+ .def_readonly("samplingMethod", &SamplingSurfaceNormalDataPointsFilter::samplingMethod)
+ .def_readonly("maxBoxDim", &SamplingSurfaceNormalDataPointsFilter::maxBoxDim)
+ .def_readonly("averageExistingDescriptors", &SamplingSurfaceNormalDataPointsFilter::averageExistingDescriptors)
+ .def_readonly("keepNormals", &SamplingSurfaceNormalDataPointsFilter::keepNormals)
+ .def_readonly("keepDensities", &SamplingSurfaceNormalDataPointsFilter::keepDensities)
+ .def_readonly("keepEigenValues", &SamplingSurfaceNormalDataPointsFilter::keepEigenValues)
+ .def_readonly("keepEigenVectors", &SamplingSurfaceNormalDataPointsFilter::keepEigenVectors)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &SamplingSurfaceNormalDataPointsFilter::filter)
+ .def("inPlaceFilter", &SamplingSurfaceNormalDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/sampling_surface_normal.h b/python/datapointsfilters/sampling_surface_normal.h
new file mode 100644
index 00000000..f94d5129
--- /dev/null
+++ b/python/datapointsfilters/sampling_surface_normal.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_SAMPLING_SURFACE_NORMAL_H
+#define PYTHON_DATAPOINTSFILTERS_SAMPLING_SURFACE_NORMAL_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSamplingSurfaceNormal(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_SAMPLING_SURFACE_NORMAL_H
diff --git a/python/datapointsfilters/shadow.cpp b/python/datapointsfilters/shadow.cpp
new file mode 100644
index 00000000..a19f8bd7
--- /dev/null
+++ b/python/datapointsfilters/shadow.cpp
@@ -0,0 +1,25 @@
+#include "shadow.h"
+
+#include "DataPointsFilters/Shadow.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindShadow(py::module& p_module)
+ {
+ using ShadowDataPointsFilter = ShadowDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "ShadowDataPointsFilter", "Shadow filter, remove ghost points appearing on edges")
+
+ .def_static("description", &ShadowDataPointsFilter::description)
+ .def_static("availableParameters", &ShadowDataPointsFilter::availableParameters)
+
+ .def_readonly("eps", &ShadowDataPointsFilter::eps)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &ShadowDataPointsFilter::filter)
+ .def("inPlaceFilter", &ShadowDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/shadow.h b/python/datapointsfilters/shadow.h
new file mode 100644
index 00000000..ff4b5a0c
--- /dev/null
+++ b/python/datapointsfilters/shadow.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_SHADOW_H
+#define PYTHON_DATAPOINTSFILTERS_SHADOW_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindShadow(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_SHADOW_H
diff --git a/python/datapointsfilters/simple_sensor_noise.cpp b/python/datapointsfilters/simple_sensor_noise.cpp
new file mode 100644
index 00000000..53585d3d
--- /dev/null
+++ b/python/datapointsfilters/simple_sensor_noise.cpp
@@ -0,0 +1,26 @@
+#include "simple_sensor_noise.h"
+
+#include "DataPointsFilters/SimpleSensorNoise.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSimpleSensorNoise(py::module& p_module)
+ {
+ using SimpleSensorNoiseDataPointsFilter = SimpleSensorNoiseDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "SimpleSensorNoiseDataPointsFilter", "Sick LMS-xxx noise model")
+
+ .def_static("description", &SimpleSensorNoiseDataPointsFilter::description)
+ .def_static("availableParameters", &SimpleSensorNoiseDataPointsFilter::availableParameters)
+
+ .def_readonly("sensorType", &SimpleSensorNoiseDataPointsFilter::sensorType)
+ .def_readonly("gain", &SimpleSensorNoiseDataPointsFilter::gain)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &SimpleSensorNoiseDataPointsFilter::filter)
+ .def("inPlaceFilter", &SimpleSensorNoiseDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/simple_sensor_noise.h b/python/datapointsfilters/simple_sensor_noise.h
new file mode 100644
index 00000000..46288222
--- /dev/null
+++ b/python/datapointsfilters/simple_sensor_noise.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_SIMPLE_SENSOR_NOISE_H
+#define PYTHON_DATAPOINTSFILTERS_SIMPLE_SENSOR_NOISE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSimpleSensorNoise(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_SIMPLE_SENSOR_NOISE_H
diff --git a/python/datapointsfilters/sphericality.cpp b/python/datapointsfilters/sphericality.cpp
new file mode 100644
index 00000000..89114e3b
--- /dev/null
+++ b/python/datapointsfilters/sphericality.cpp
@@ -0,0 +1,26 @@
+#include "sphericality.h"
+
+#include "DataPointsFilters/Sphericality.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSphericality(py::module& p_module)
+ {
+ using SphericalityDataPointsFilter = SphericalityDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "SphericalityDataPointsFilter")
+
+ .def_static("description", &SphericalityDataPointsFilter::description)
+ .def_static("availableParameters", &SphericalityDataPointsFilter::availableParameters)
+
+ .def_readonly("keepUnstructureness", &SphericalityDataPointsFilter::keepUnstructureness)
+ .def_readonly("keepStructureness", &SphericalityDataPointsFilter::keepStructureness)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &SphericalityDataPointsFilter::filter)
+ .def("inPlaceFilter", &SphericalityDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/sphericality.h b/python/datapointsfilters/sphericality.h
new file mode 100644
index 00000000..9e52bc31
--- /dev/null
+++ b/python/datapointsfilters/sphericality.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_SPHERICALITY_H
+#define PYTHON_DATAPOINTSFILTERS_SPHERICALITY_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSphericality(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_SPHERICALITY_H
diff --git a/python/datapointsfilters/surface_normal.cpp b/python/datapointsfilters/surface_normal.cpp
new file mode 100644
index 00000000..b90db5be
--- /dev/null
+++ b/python/datapointsfilters/surface_normal.cpp
@@ -0,0 +1,35 @@
+#include "surface_normal.h"
+
+#include "DataPointsFilters/SurfaceNormal.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSurfaceNormal(py::module& p_module)
+ {
+ using SurfaceNormalDataPointsFilter = SurfaceNormalDataPointsFilter;
+ py::class_, DataPointsFilter>(p_module, "SurfaceNormalDataPointsFilter")
+
+ .def_static("description", &SurfaceNormalDataPointsFilter::description)
+ .def_static("availableParameters", &SurfaceNormalDataPointsFilter::availableParameters)
+
+ .def_readonly("knn", &SurfaceNormalDataPointsFilter::knn)
+ .def_readonly("maxDist", &SurfaceNormalDataPointsFilter::maxDist)
+ .def_readonly("epsilon", &SurfaceNormalDataPointsFilter::epsilon)
+ .def_readonly("keepNormals", &SurfaceNormalDataPointsFilter::keepNormals)
+ .def_readonly("keepDensities", &SurfaceNormalDataPointsFilter::keepDensities)
+ .def_readonly("keepEigenValues", &SurfaceNormalDataPointsFilter::keepEigenValues)
+ .def_readonly("keepEigenVectors", &SurfaceNormalDataPointsFilter::keepEigenVectors)
+ .def_readonly("keepMatchedIds", &SurfaceNormalDataPointsFilter::keepMatchedIds)
+ .def_readonly("keepMeanDist", &SurfaceNormalDataPointsFilter::keepMeanDist)
+ .def_readonly("sortEigen", &SurfaceNormalDataPointsFilter::sortEigen)
+ .def_readonly("smoothNormals", &SurfaceNormalDataPointsFilter::smoothNormals)
+
+ .def(py::init(), py::arg("params") = Parameters(), "Constructor, uses parameter interface")
+
+ .def("filter", &SurfaceNormalDataPointsFilter::filter)
+ .def("inPlaceFilter", &SurfaceNormalDataPointsFilter::inPlaceFilter);
+ }
+ }
+}
diff --git a/python/datapointsfilters/surface_normal.h b/python/datapointsfilters/surface_normal.h
new file mode 100644
index 00000000..af24feea
--- /dev/null
+++ b/python/datapointsfilters/surface_normal.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_DATAPOINTSFILTERS_SURFACE_NORMAL_H
+#define PYTHON_DATAPOINTSFILTERS_SURFACE_NORMAL_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace datapointsfilters
+ {
+ void pybindSurfaceNormal(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_DATAPOINTSFILTERS_SURFACE_NORMAL_H
diff --git a/python/errorminimizers/identity.cpp b/python/errorminimizers/identity.cpp
new file mode 100644
index 00000000..b7f37b00
--- /dev/null
+++ b/python/errorminimizers/identity.cpp
@@ -0,0 +1,17 @@
+#include "identity.h"
+
+#include "pointmatcher/ErrorMinimizersImpl.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindIdentity(py::module& p_module)
+ {
+ using IdentityErrorMinimizer = ErrorMinimizersImpl::IdentityErrorMinimizer;
+ py::class_, ErrorMinimizer>(p_module, "IdentityErrorMinimizer")
+ .def(py::init<>()).def_static("description", &IdentityErrorMinimizer::description)
+ .def("compute", &IdentityErrorMinimizer::compute, py::arg("mPts"));
+ }
+ }
+}
diff --git a/python/errorminimizers/identity.h b/python/errorminimizers/identity.h
new file mode 100644
index 00000000..93d65e5b
--- /dev/null
+++ b/python/errorminimizers/identity.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_ERRORMINIMIZERS_IDENTITY_H
+#define PYTHON_ERRORMINIMIZERS_IDENTITY_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindIdentity(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_ERRORMINIMIZERS_IDENTITY_H
diff --git a/python/errorminimizers/point_to_plane.cpp b/python/errorminimizers/point_to_plane.cpp
new file mode 100644
index 00000000..f8399e77
--- /dev/null
+++ b/python/errorminimizers/point_to_plane.cpp
@@ -0,0 +1,31 @@
+#include "point_to_plane.h"
+
+#include "pointmatcher/ErrorMinimizersImpl.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPlane(py::module& p_module)
+ {
+ using PointToPlaneErrorMinimizer = ErrorMinimizersImpl::PointToPlaneErrorMinimizer;
+ py::class_, ErrorMinimizer>(p_module, "PointToPlaneErrorMinimizer")
+ .def(py::init(), py::arg("params") = Parameters())
+ .def(py::init(), py::arg("paramsDoc"), py::arg("params") = Parameters())
+
+ .def_readonly("force2D", &PointToPlaneErrorMinimizer::force2D)
+ .def_readonly("force4DOF", &PointToPlaneErrorMinimizer::force4DOF)
+
+ .def_static("description", &PointToPlaneErrorMinimizer::description)
+ .def_static("availableParameters", &PointToPlaneErrorMinimizer::availableParameters)
+
+ .def("name", &PointToPlaneErrorMinimizer::name)
+ .def("compute", &PointToPlaneErrorMinimizer::compute, py::arg("mPts"))
+ .def("compute_in_place", &PointToPlaneErrorMinimizer::compute_in_place, py::arg("mPts"))
+ .def("getResidualError", &PointToPlaneErrorMinimizer::getResidualError, py::arg("filteredReading"), py::arg("filteredReference"), py::arg("outlierWeights"), py::arg("matches"))
+ .def("getOverlap", &PointToPlaneErrorMinimizer::getOverlap)
+
+ .def_static("computeResidualError", &PointToPlaneErrorMinimizer::computeResidualError, py::arg("mPts"), py::arg("force2D"));
+ }
+ }
+}
diff --git a/python/errorminimizers/point_to_plane.h b/python/errorminimizers/point_to_plane.h
new file mode 100644
index 00000000..fa2ea14b
--- /dev/null
+++ b/python/errorminimizers/point_to_plane.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_ERRORMINIMIZERS_POINT_TO_PLANE_H
+#define PYTHON_ERRORMINIMIZERS_POINT_TO_PLANE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPlane(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_ERRORMINIMIZERS_POINT_TO_PLANE_H
diff --git a/python/errorminimizers/point_to_plane_with_cov.cpp b/python/errorminimizers/point_to_plane_with_cov.cpp
new file mode 100644
index 00000000..88edc480
--- /dev/null
+++ b/python/errorminimizers/point_to_plane_with_cov.cpp
@@ -0,0 +1,28 @@
+#include "point_to_plane_with_cov.h"
+
+#include "pointmatcher/ErrorMinimizersImpl.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPlaneWithCov(py::module& p_module)
+ {
+ using PointToPlaneErrorMinimizer = ErrorMinimizersImpl::PointToPlaneErrorMinimizer;
+ using PointToPlaneWithCovErrorMinimizer = ErrorMinimizersImpl::PointToPlaneWithCovErrorMinimizer;
+ py::class_, PointToPlaneErrorMinimizer>(p_module, "PointToPlaneWithCovErrorMinimizer")
+ .def("name", &PointToPlaneWithCovErrorMinimizer::name)
+
+ .def_static("description", &PointToPlaneWithCovErrorMinimizer::description)
+ .def_static("availableParameters", &PointToPlaneWithCovErrorMinimizer::availableParameters)
+
+ .def_readonly("sensorStdDev", &PointToPlaneWithCovErrorMinimizer::sensorStdDev)
+ .def_readwrite("covMatrix", &PointToPlaneWithCovErrorMinimizer::covMatrix)
+
+ .def(py::init(), py::arg("params") = Parameters())
+ .def("compute", &PointToPlaneWithCovErrorMinimizer::compute, py::arg("mPts"))
+ .def("getCovariance", &PointToPlaneWithCovErrorMinimizer::getCovariance)
+ .def("estimateCovariance", &PointToPlaneWithCovErrorMinimizer::estimateCovariance, py::arg("mPts"), py::arg("transformation"));
+ }
+ }
+}
diff --git a/python/errorminimizers/point_to_plane_with_cov.h b/python/errorminimizers/point_to_plane_with_cov.h
new file mode 100644
index 00000000..714b9d77
--- /dev/null
+++ b/python/errorminimizers/point_to_plane_with_cov.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_ERRORMINIMIZERS_POINT_TO_PLANE_WITH_COV_H
+#define PYTHON_ERRORMINIMIZERS_POINT_TO_PLANE_WITH_COV_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPlaneWithCov(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_ERRORMINIMIZERS_POINT_TO_PLANE_WITH_COV_H
diff --git a/python/errorminimizers/point_to_point.cpp b/python/errorminimizers/point_to_point.cpp
new file mode 100644
index 00000000..2bb5f2b3
--- /dev/null
+++ b/python/errorminimizers/point_to_point.cpp
@@ -0,0 +1,22 @@
+#include "point_to_point.h"
+
+#include "pointmatcher/ErrorMinimizersImpl.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPoint(py::module& p_module)
+ {
+ using PointToPointErrorMinimizer = ErrorMinimizersImpl::PointToPointErrorMinimizer;
+ py::class_, ErrorMinimizer>(p_module, "PointToPointErrorMinimizer")
+ .def(py::init<>())
+ .def(py::init(), py::arg("className"), py::arg("paramsDoc"), py::arg("params"))
+ .def("compute", &PointToPointErrorMinimizer::compute, py::arg("mPts"))
+ .def("compute_in_place", &PointToPointErrorMinimizer::compute_in_place, py::arg("mPts"))
+ .def("getResidualError", &PointToPointErrorMinimizer::getResidualError, py::arg("filteredReading"), py::arg("filteredReference"), py::arg("outlierWeights"), py::arg("matches"))
+ .def("getOverlap", &PointToPointErrorMinimizer::getOverlap)
+ .def_static("computeResidualError", &PointToPointErrorMinimizer::computeResidualError, py::arg("mPts"));
+ }
+ }
+}
diff --git a/python/errorminimizers/point_to_point.h b/python/errorminimizers/point_to_point.h
new file mode 100644
index 00000000..c7dbdd6b
--- /dev/null
+++ b/python/errorminimizers/point_to_point.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_H
+#define PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPoint(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_H
diff --git a/python/errorminimizers/point_to_point_similarity.cpp b/python/errorminimizers/point_to_point_similarity.cpp
new file mode 100644
index 00000000..9bc843de
--- /dev/null
+++ b/python/errorminimizers/point_to_point_similarity.cpp
@@ -0,0 +1,19 @@
+#include "point_to_point_similarity.h"
+
+#include "pointmatcher/ErrorMinimizersImpl.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPointSimilarity(py::module& p_module)
+ {
+ using PointToPointSimilarityErrorMinimizer = ErrorMinimizersImpl::PointToPointSimilarityErrorMinimizer;
+ py::class_, ErrorMinimizer>(p_module, "PointToPointSimilarityErrorMinimizer")
+ .def(py::init<>()).def_static("description", &PointToPointSimilarityErrorMinimizer::description)
+ .def("compute", &PointToPointSimilarityErrorMinimizer::compute, py::arg("mPts"))
+ .def("getResidualError", &PointToPointSimilarityErrorMinimizer::getResidualError, py::arg("filteredReading"), py::arg("filteredReference"), py::arg("outlierWeights"), py::arg("matches"))
+ .def("getOverlap", &PointToPointSimilarityErrorMinimizer::getOverlap);
+ }
+ }
+}
diff --git a/python/errorminimizers/point_to_point_similarity.h b/python/errorminimizers/point_to_point_similarity.h
new file mode 100644
index 00000000..87ad7485
--- /dev/null
+++ b/python/errorminimizers/point_to_point_similarity.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_SIMILARITY_H
+#define PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_SIMILARITY_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPointSimilarity(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_SIMILARITY_H
diff --git a/python/errorminimizers/point_to_point_with_cov.cpp b/python/errorminimizers/point_to_point_with_cov.cpp
new file mode 100644
index 00000000..0420276e
--- /dev/null
+++ b/python/errorminimizers/point_to_point_with_cov.cpp
@@ -0,0 +1,24 @@
+#include "point_to_point_with_cov.h"
+
+#include "pointmatcher/ErrorMinimizersImpl.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPointWithCov(py::module& p_module)
+ {
+ using PointToPointErrorMinimizer = ErrorMinimizersImpl::PointToPointErrorMinimizer;
+ using PointToPointWithCovErrorMinimizer = ErrorMinimizersImpl::PointToPointWithCovErrorMinimizer;
+ py::class_, PointToPointErrorMinimizer>(p_module, "PointToPointWithCovErrorMinimizer")
+ .def(py::init(), py::arg("params") = Parameters())
+ .def_readonly("sensorStdDev", &PointToPointWithCovErrorMinimizer::sensorStdDev)
+ .def_readwrite("covMatrix", &PointToPointWithCovErrorMinimizer::covMatrix)
+ .def_static("description", &PointToPointWithCovErrorMinimizer::description)
+ .def_static("availableParameters", &PointToPointWithCovErrorMinimizer::availableParameters)
+ .def("compute", &PointToPointWithCovErrorMinimizer::compute, py::arg("mPts"))
+ .def("getCovariance", &PointToPointWithCovErrorMinimizer::getCovariance)
+ .def("estimateCovariance", &PointToPointWithCovErrorMinimizer::estimateCovariance, py::arg("mPts"), py::arg("transformation"));
+ }
+ }
+}
diff --git a/python/errorminimizers/point_to_point_with_cov.h b/python/errorminimizers/point_to_point_with_cov.h
new file mode 100644
index 00000000..5e1e0ae1
--- /dev/null
+++ b/python/errorminimizers/point_to_point_with_cov.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_WITH_COV_H
+#define PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_WITH_COV_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace errorminimizers
+ {
+ void pybindPointToPointWithCov(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_ERRORMINIMIZERS_POINT_TO_POINT_WITH_COV_H
diff --git a/python/modules/data_points_filters_module.cpp b/python/modules/data_points_filters_module.cpp
new file mode 100644
index 00000000..99ab1eff
--- /dev/null
+++ b/python/modules/data_points_filters_module.cpp
@@ -0,0 +1,62 @@
+#include "data_points_filters_module.h"
+
+#include "datapointsfilters/bounding_box.h"
+#include "datapointsfilters/covariance_sampling.h"
+#include "datapointsfilters/cut_at_descriptor_threshold.h"
+#include "datapointsfilters/distance_limit.h"
+#include "datapointsfilters/ellipsoids.h"
+#include "datapointsfilters/fix_step_sampling.h"
+#include "datapointsfilters/gestalt.h"
+#include "datapointsfilters/identity.h"
+#include "datapointsfilters/incidence_angle.h"
+#include "datapointsfilters/max_density.h"
+#include "datapointsfilters/max_pointcount.h"
+#include "datapointsfilters/max_quantile_on_axis.h"
+#include "datapointsfilters/normal_space.h"
+#include "datapointsfilters/observation_direction.h"
+#include "datapointsfilters/octree_grid.h"
+#include "datapointsfilters/orient_normals.h"
+#include "datapointsfilters/random_sampling.h"
+#include "datapointsfilters/remove_nan.h"
+#include "datapointsfilters/remove_sensor_bias.h"
+#include "datapointsfilters/sampling_surface_normal.h"
+#include "datapointsfilters/shadow.h"
+#include "datapointsfilters/simple_sensor_noise.h"
+#include "datapointsfilters/sphericality.h"
+#include "datapointsfilters/surface_normal.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindDataPointsFiltersModule(py::module& p_module)
+ {
+ py::module datapointsfilterModule = p_module.def_submodule("datapointsfilters");
+
+ datapointsfilters::pybindBoundingBox(datapointsfilterModule);
+ datapointsfilters::pybindCovarianceSampling(datapointsfilterModule);
+ datapointsfilters::pybindCutAtDescriptorThreshold(datapointsfilterModule);
+ datapointsfilters::pybindDistanceLimit(datapointsfilterModule);
+ datapointsfilters::pybindEllipsoids(datapointsfilterModule);
+ datapointsfilters::pybindFixStepSampling(datapointsfilterModule);
+ datapointsfilters::pybindGestalt(datapointsfilterModule);
+ datapointsfilters::pybindIdentity(datapointsfilterModule);
+ datapointsfilters::pybindIncidenceAngle(datapointsfilterModule);
+ datapointsfilters::pybindMaxDensity(datapointsfilterModule);
+ datapointsfilters::pybindMaxPointCount(datapointsfilterModule);
+ datapointsfilters::pybindMaxQuantileOnAxis(datapointsfilterModule);
+ datapointsfilters::pybindNormalSpace(datapointsfilterModule);
+ datapointsfilters::pybindObservationDirection(datapointsfilterModule);
+ datapointsfilters::pybindOctreeGrid(datapointsfilterModule);
+ datapointsfilters::pybindOrientNormals(datapointsfilterModule);
+ datapointsfilters::pybindRandomSampling(datapointsfilterModule);
+ datapointsfilters::pybindRemoveNaN(datapointsfilterModule);
+ datapointsfilters::pybindRemoveSensorBias(datapointsfilterModule);
+ datapointsfilters::pybindSamplingSurfaceNormal(datapointsfilterModule);
+ datapointsfilters::pybindShadow(datapointsfilterModule);
+ datapointsfilters::pybindSimpleSensorNoise(datapointsfilterModule);
+ datapointsfilters::pybindSphericality(datapointsfilterModule);
+ datapointsfilters::pybindSurfaceNormal(datapointsfilterModule);
+ }
+ }
+}
diff --git a/python/modules/data_points_filters_module.h b/python/modules/data_points_filters_module.h
new file mode 100644
index 00000000..92ef66d1
--- /dev/null
+++ b/python/modules/data_points_filters_module.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_MODULES_DATA_POINTS_FILTERS_MODULE_H
+#define PYTHON_MODULES_DATA_POINTS_FILTERS_MODULE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindDataPointsFiltersModule(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_MODULES_DATA_POINTS_FILTERS_MODULE_H
diff --git a/python/modules/error_minimizers_module.cpp b/python/modules/error_minimizers_module.cpp
new file mode 100644
index 00000000..c428665f
--- /dev/null
+++ b/python/modules/error_minimizers_module.cpp
@@ -0,0 +1,26 @@
+#include "error_minimizers_module.h"
+
+#include "errorminimizers/identity.h"
+#include "errorminimizers/point_to_plane.h"
+#include "errorminimizers/point_to_plane_with_cov.h"
+#include "errorminimizers/point_to_point.h"
+#include "errorminimizers/point_to_point_similarity.h"
+#include "errorminimizers/point_to_point_with_cov.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindErrorMinimizersModule(py::module& p_module)
+ {
+ py::module errorminizersModule = p_module.def_submodule("errorminimizers");
+
+ errorminimizers::pybindIdentity(errorminizersModule);
+ errorminimizers::pybindPointToPlane(errorminizersModule);
+ errorminimizers::pybindPointToPlaneWithCov(errorminizersModule);
+ errorminimizers::pybindPointToPoint(errorminizersModule);
+ errorminimizers::pybindPointToPointSimilarity(errorminizersModule);
+ errorminimizers::pybindPointToPointWithCov(errorminizersModule);
+ }
+ }
+}
diff --git a/python/modules/error_minimizers_module.h b/python/modules/error_minimizers_module.h
new file mode 100644
index 00000000..1a1702aa
--- /dev/null
+++ b/python/modules/error_minimizers_module.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_MODULES_ERROR_MINIMIZERS_MODULE_H
+#define PYTHON_MODULES_ERROR_MINIMIZERS_MODULE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindErrorMinimizersModule(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_MODULES_ERROR_MINIMIZERS_MODULE_H
diff --git a/python/modules/point_matcher_module.cpp b/python/modules/point_matcher_module.cpp
new file mode 100644
index 00000000..349f871c
--- /dev/null
+++ b/python/modules/point_matcher_module.cpp
@@ -0,0 +1,20 @@
+#include "point_matcher_module.h"
+
+#include "pointmatcher/point_matcher.h"
+#include "pointmatcher/impl.h"
+#include "pointmatcher/io.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindPointMatcherModule(py::module& p_module)
+ {
+ py::module pointmatcherModule = p_module.def_submodule("pointmatcher");
+
+ pointmatcher::pybindPointMatcher(pointmatcherModule);
+ pointmatcher::pybindIO(pointmatcherModule);
+ pointmatcher::pybindImpl(pointmatcherModule);
+ }
+ }
+}
diff --git a/python/modules/point_matcher_module.h b/python/modules/point_matcher_module.h
new file mode 100644
index 00000000..dcd5abe3
--- /dev/null
+++ b/python/modules/point_matcher_module.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_MODULES_POINT_MATCHER_MODULE_H
+#define PYTHON_MODULES_POINT_MATCHER_MODULE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindPointMatcherModule(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_MODULES_POINT_MATCHER_MODULE_H
diff --git a/python/modules/point_matcher_support_module.cpp b/python/modules/point_matcher_support_module.cpp
new file mode 100644
index 00000000..e199ea1e
--- /dev/null
+++ b/python/modules/point_matcher_support_module.cpp
@@ -0,0 +1,44 @@
+#include "point_matcher_support_module.h"
+
+#include "pointmatchersupport/bibliography.h"
+#include "pointmatchersupport/logger.h"
+#include "pointmatchersupport/logger_impl.h"
+#include "pointmatchersupport/parametrizable.h"
+#include "pointmatchersupport/registrar.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindPointMatcherSupportModule(py::module& p_module)
+ {
+ py::module pointmatchersupportModule = p_module.def_submodule("pointmatchersupport");
+
+ using InvalidModuleType = pms::InvalidModuleType;
+ py::register_exception(pointmatchersupportModule, "InvalidModuleType");
+
+ using TransformationError = pms::TransformationError;
+ py::register_exception(pointmatchersupportModule, "TransformationError");
+
+ using ConfigurationError = pms::ConfigurationError;
+ py::register_exception(pointmatchersupportModule, "ConfigurationError");
+
+ using InvalidElement = pms::InvalidElement;
+ py::register_exception(pointmatchersupportModule, "InvalidElement");
+
+ pointmatchersupport::pybindBibliography(pointmatchersupportModule);
+ pointmatchersupport::pybindParametrizable(pointmatchersupportModule);
+ pointmatchersupport::pybindLogger(pointmatchersupportModule);
+ pointmatchersupport::pybindLoggerImpl(pointmatchersupportModule);
+ pointmatchersupport::pybindRegistrar(pointmatchersupportModule);
+
+ pointmatchersupportModule
+ .def("setLogger", &pms::setLogger, py::arg("newLogger"), "Set a new logger, protected by a mutex")
+ .def("validateFile", &pms::validateFile, py::arg("fileName"), "Throw a runtime_error exception if fileName cannot be opened");
+
+ using CsvElements = pms::CsvElements;
+ py::bind_map(pointmatchersupportModule, "CsvElements", "Data from a CSV file")
+ .def("clear", &CsvElements::clear);
+ }
+ }
+}
diff --git a/python/modules/point_matcher_support_module.h b/python/modules/point_matcher_support_module.h
new file mode 100644
index 00000000..9a26bd65
--- /dev/null
+++ b/python/modules/point_matcher_support_module.h
@@ -0,0 +1,14 @@
+#ifndef PYTHON_MODULES_POINT_MATCHER_SUPPORT_MODULE_H
+#define PYTHON_MODULES_POINT_MATCHER_SUPPORT_MODULE_H
+
+#include "pypoint_matcher_helper.h"
+
+namespace python
+{
+ namespace modules
+ {
+ void pybindPointMatcherSupportModule(py::module& p_module);
+ }
+}
+
+#endif //PYTHON_MODULES_POINT_MATCHER_SUPPORT_MODULE_H
diff --git a/python/pointmatcher/data_points.cpp b/python/pointmatcher/data_points.cpp
new file mode 100644
index 00000000..9f7f12e8
--- /dev/null
+++ b/python/pointmatcher/data_points.cpp
@@ -0,0 +1,171 @@
+#include "data_points.h"
+
+namespace python
+{
+ namespace pointmatcher
+ {
+ void pybindDataPoints(py::class_& p_class)
+ {
+ using View = DataPoints::View;
+ using TimeView = DataPoints::TimeView;
+ using ConstView = DataPoints::ConstView;
+ using TimeConstView = DataPoints::TimeConstView;
+
+ py::class_ pyDataPoints(p_class, "DataPoints");
+
+ pyDataPoints.doc() = R"pbdoc(
+A point cloud
+
+For every point, it has features and, optionally, descriptors.
+Features are typically the coordinates of the point in the space.
+Descriptors contain information attached to the point, such as its color, its normal vector, etc.
+In both features and descriptors, every point can have multiple channels.
+Every channel has a dimension and a name.
+For instance, a typical 3D cloud might have the channels \c x, \c y, \c z, \c w of dimension 1 as features (using homogeneous coordinates), and the channel \c normal of size 3 as descriptor.
+There are no sub-channels, such as \c normal.x, for the sake of simplicity.
+Moreover, the position of the points is in homogeneous coordinates because they need both translation and rotation, while the normals need only rotation.
+All channels contain scalar values of type ScalarType.)pbdoc";
+
+ py::class_