Skip to content

Commit

Permalink
factor out uses of omp_get_num_threads() and omp_get_max_threads() ou…
Browse files Browse the repository at this point in the history
…tside of OpenMP wrapper (#6133)
  • Loading branch information
jameslamb committed Oct 9, 2023
1 parent ad02551 commit 992f505
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 10 deletions.
11 changes: 3 additions & 8 deletions src/objective/rank_objective.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -289,17 +289,12 @@ class LambdarankNDCG : public RankingObjective {

void UpdatePositionBiasFactors(const score_t* lambdas, const score_t* hessians) const override {
/// get number of threads
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
int num_threads = OMP_NUM_THREADS();
// create per-thread buffers for first and second derivatives of utility w.r.t. position bias factors
std::vector<double> bias_first_derivatives(num_position_ids_ * num_threads, 0.0);
std::vector<double> bias_second_derivatives(num_position_ids_ * num_threads, 0.0);
std::vector<int> instance_counts(num_position_ids_ * num_threads, 0);
#pragma omp parallel for schedule(guided)
#pragma omp parallel for schedule(guided) num_threads(num_threads)
for (data_size_t i = 0; i < num_data_; i++) {
// get thread ID
const int tid = omp_get_thread_num();
Expand All @@ -310,7 +305,7 @@ class LambdarankNDCG : public RankingObjective {
bias_second_derivatives[offset] -= hessians[i];
instance_counts[offset]++;
}
#pragma omp parallel for schedule(guided)
#pragma omp parallel for schedule(guided) num_threads(num_threads)
for (data_size_t i = 0; i < num_position_ids_; i++) {
double bias_first_derivative = 0.0;
double bias_second_derivative = 0.0;
Expand Down
2 changes: 1 addition & 1 deletion src/treelearner/gpu_tree_learner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ void GPUTreeLearner::AllocateGPUMemory() {
// for data transfer time
auto start_time = std::chrono::steady_clock::now();
// Now generate new data structure feature4, and copy data to the device
int nthreads = std::min(omp_get_max_threads(), static_cast<int>(dense_feature_group_map_.size()) / dword_features_);
int nthreads = std::min(OMP_NUM_THREADS(), static_cast<int>(dense_feature_group_map_.size()) / dword_features_);
nthreads = std::max(nthreads, 1);
std::vector<Feature4*> host4_vecs(nthreads);
std::vector<boost::compute::buffer> host4_bufs(nthreads);
Expand Down
2 changes: 1 addition & 1 deletion src/treelearner/linear_tree_learner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void LinearTreeLearner::InitLinear(const Dataset* train_data, const int max_leav
}
XTHX_by_thread_.clear();
XTg_by_thread_.clear();
int max_threads = omp_get_max_threads();
int max_threads = OMP_NUM_THREADS();
for (int i = 0; i < max_threads; ++i) {
XTHX_by_thread_.push_back(XTHX_);
XTg_by_thread_.push_back(XTg_);
Expand Down

0 comments on commit 992f505

Please sign in to comment.