Skip to content

Commit

Permalink
GICP: parallel covariance computation
Browse files Browse the repository at this point in the history
  • Loading branch information
mvieth committed May 18, 2024
1 parent 2d5101a commit 44bdfc2
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 5 deletions.
9 changes: 9 additions & 0 deletions registration/include/pcl/registration/gicp.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ class GeneralizedIterativeClosestPoint
max_iterations_ = 200;
transformation_epsilon_ = 5e-4;
corr_dist_threshold_ = 5.;
setNumberOfThreads(0);
rigid_transformation_estimation_ = [this](const PointCloudSource& cloud_src,
const pcl::Indices& indices_src,
const PointCloudTarget& cloud_tgt,
Expand Down Expand Up @@ -355,6 +356,11 @@ class GeneralizedIterativeClosestPoint
return rotation_gradient_tolerance_;
}

/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to automatic)
*/
void
setNumberOfThreads (unsigned int nr_threads = 0);
protected:
/** \brief The number of neighbors used for covariances computation.
* default: 20
Expand Down Expand Up @@ -508,6 +514,9 @@ class GeneralizedIterativeClosestPoint
Eigen::Matrix3d& ddR_dTheta_dTheta,
Eigen::Matrix3d& ddR_dTheta_dPsi,
Eigen::Matrix3d& ddR_dPsi_dPsi) const;

/** \brief The number of threads the scheduler should use. */
unsigned int threads_;
};
} // namespace pcl

Expand Down
26 changes: 21 additions & 5 deletions registration/include/pcl/registration/impl/gicp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,22 @@

namespace pcl {

template <typename PointSource, typename PointTarget, typename Scalar> void
GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::setNumberOfThreads (unsigned int nr_threads)
{
#ifdef _OPENMP
if (nr_threads == 0)
threads_ = omp_get_num_procs();
else
threads_ = nr_threads;
PCL_DEBUG ("[pcl::GeneralizedIterativeClosestPoint::setNumberOfThreads] Setting number of threads to %u.\n", threads_);
#else
threads_ = 1;
if (nr_threads != 1)
PCL_WARN ("[pcl::GeneralizedIterativeClosestPoint::setNumberOfThreads] Parallelization is requested, but OpenMP is not available! Continuing without parallelization.\n");
#endif // _OPENMP
}

template <typename PointSource, typename PointTarget, typename Scalar>
template <typename PointT>
void
Expand All @@ -62,18 +78,17 @@ GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::computeCovar
}

Eigen::Vector3d mean;
Eigen::Matrix3d cov;
pcl::Indices nn_indices(k_correspondences_);
std::vector<float> nn_dist_sq(k_correspondences_);

// We should never get there but who knows
if (cloud_covariances.size() < cloud->size())
cloud_covariances.resize(cloud->size());

auto matrices_iterator = cloud_covariances.begin();
for (auto points_iterator = cloud->begin(); points_iterator != cloud->end();
++points_iterator, ++matrices_iterator) {
const PointT& query_point = *points_iterator;
Eigen::Matrix3d& cov = *matrices_iterator;
#pragma omp parallel for default(none) num_threads(threads_) schedule(dynamic, 32) shared(cloud, cloud_covariances, kdtree) firstprivate(mean, cov, nn_indices, nn_dist_sq)
for (std::ptrdiff_t i=0; i<static_cast<std::ptrdiff_t>(cloud->size()); ++i) {
const PointT& query_point = (*cloud)[i];
// Zero out the cov and mean
cov.setZero();
mean.setZero();
Expand Down Expand Up @@ -124,6 +139,7 @@ GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::computeCovar
v = gicp_epsilon_;
cov += v * col * col.transpose();
}
cloud_covariances[i] = cov;
}
}

Expand Down

0 comments on commit 44bdfc2

Please sign in to comment.