Skip to content

Commit

Permalink
Merge pull request #6046 from mvieth/gicp_parallel_cov
Browse files Browse the repository at this point in the history
GICP: parallel covariance computation
  • Loading branch information
mvieth committed May 28, 2024
2 parents e89f5ac + d05e8f6 commit 7e37440
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 5 deletions.
11 changes: 11 additions & 0 deletions registration/include/pcl/registration/gicp.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,7 @@ class GeneralizedIterativeClosestPoint
max_iterations_ = 200;
transformation_epsilon_ = 5e-4;
corr_dist_threshold_ = 5.;
setNumberOfThreads(0);
rigid_transformation_estimation_ = [this](const PointCloudSource& cloud_src,
const pcl::Indices& indices_src,
const PointCloudTarget& cloud_tgt,
Expand Down Expand Up @@ -371,6 +372,13 @@ class GeneralizedIterativeClosestPoint
return rotation_gradient_tolerance_;
}

/** \brief Initialize the scheduler and set the number of threads to use.
* \param nr_threads the number of hardware threads to use (0 sets the value back to
* automatic)
*/
void
setNumberOfThreads(unsigned int nr_threads = 0);

protected:
/** \brief The number of neighbors used for covariances computation.
* default: 20
Expand Down Expand Up @@ -524,6 +532,9 @@ class GeneralizedIterativeClosestPoint
Eigen::Matrix3d& ddR_dTheta_dTheta,
Eigen::Matrix3d& ddR_dTheta_dPsi,
Eigen::Matrix3d& ddR_dPsi_dPsi) const;

/** \brief The number of threads the scheduler should use. */
unsigned int threads_;
};
} // namespace pcl

Expand Down
34 changes: 29 additions & 5 deletions registration/include/pcl/registration/impl/gicp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,28 @@

namespace pcl {

template <typename PointSource, typename PointTarget, typename Scalar>
void
GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::setNumberOfThreads(
unsigned int nr_threads)
{
#ifdef _OPENMP
if (nr_threads == 0)
threads_ = omp_get_num_procs();
else
threads_ = nr_threads;
PCL_DEBUG("[pcl::GeneralizedIterativeClosestPoint::setNumberOfThreads] Setting "
"number of threads to %u.\n",
threads_);
#else
threads_ = 1;
if (nr_threads != 1)
PCL_WARN("[pcl::GeneralizedIterativeClosestPoint::setNumberOfThreads] "
"Parallelization is requested, but OpenMP is not available! Continuing "
"without parallelization.\n");
#endif // _OPENMP
}

template <typename PointSource, typename PointTarget, typename Scalar>
template <typename PointT>
void
Expand All @@ -62,18 +84,19 @@ GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::computeCovar
}

Eigen::Vector3d mean;
Eigen::Matrix3d cov;
pcl::Indices nn_indices(k_correspondences_);
std::vector<float> nn_dist_sq(k_correspondences_);

// We should never get there but who knows
if (cloud_covariances.size() < cloud->size())
cloud_covariances.resize(cloud->size());

auto matrices_iterator = cloud_covariances.begin();
for (auto points_iterator = cloud->begin(); points_iterator != cloud->end();
++points_iterator, ++matrices_iterator) {
const PointT& query_point = *points_iterator;
Eigen::Matrix3d& cov = *matrices_iterator;
#pragma omp parallel for default(none) num_threads(threads_) schedule(dynamic, 32) \
shared(cloud, cloud_covariances, kdtree) \
firstprivate(mean, cov, nn_indices, nn_dist_sq)
for (std::ptrdiff_t i = 0; i < static_cast<std::ptrdiff_t>(cloud->size()); ++i) {
const PointT& query_point = (*cloud)[i];
// Zero out the cov and mean
cov.setZero();
mean.setZero();
Expand Down Expand Up @@ -124,6 +147,7 @@ GeneralizedIterativeClosestPoint<PointSource, PointTarget, Scalar>::computeCovar
v = gicp_epsilon_;
cov += v * col * col.transpose();
}
cloud_covariances[i] = cov;
}
}

Expand Down

0 comments on commit 7e37440

Please sign in to comment.