Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[c++] Initial Work for Pairwise Ranking #6182

Open
wants to merge 88 commits into
base: master
Choose a base branch
from
Open
Changes from 11 commits
Commits
Show all changes
88 commits
Select commit Hold shift + click to select a range
9ae3476
initial work for pairwise ranking (dataset part)
shiyu1994 Nov 8, 2023
2314099
remove unrelated changes
shiyu1994 Nov 8, 2023
06ddf68
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Nov 8, 2023
42e91e2
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Nov 23, 2023
a8379d4
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Dec 1, 2023
da5f02d
first version of pairwie ranking bin
shiyu1994 Dec 5, 2023
9d0afd9
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Dec 5, 2023
0cb436d
templates for bins in pairwise ranking dataset
shiyu1994 Dec 5, 2023
fc9b381
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Dec 5, 2023
6fbc674
fix lint issues and compilation errors
shiyu1994 Dec 6, 2023
6082913
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Dec 6, 2023
9e16dc3
add methods for pairwise bin
shiyu1994 Dec 6, 2023
6154bde
instantiate templates
shiyu1994 Dec 6, 2023
3a646eb
remove unrelated files
shiyu1994 Dec 6, 2023
9e77ab9
add return values for unimplemented methods
shiyu1994 Dec 7, 2023
eba4560
add new files and windows/LightGBM.vcxproj and windows/LightGBM.vcxpr…
shiyu1994 Dec 7, 2023
f1d2281
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Dec 7, 2023
873d7ad
create pairwise dataset
shiyu1994 Dec 7, 2023
3838b9b
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Dec 7, 2023
986a979
set num_data_ of pairwise dataset
shiyu1994 Dec 7, 2023
c40965a
skip query with no paired items
shiyu1994 Dec 15, 2023
97d34d7
store original query information
shiyu1994 Jan 31, 2024
1e57e27
copy position information for pairwise dataset
shiyu1994 Jan 31, 2024
1699c06
rename to pointwise members
shiyu1994 Feb 1, 2024
d5b6f0a
adding initial support for pairwise gradients and NDCG eval with pair…
metpavel Feb 9, 2024
2ee1199
fix score offsets
metpavel Feb 9, 2024
fe10a2c
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Feb 19, 2024
0aaf090
skip copy for weights and label if none
shiyu1994 Feb 19, 2024
8714bfb
fix pairwise dataset bugs
shiyu1994 Feb 29, 2024
250996b
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Feb 29, 2024
38b2f3e
fix validation set with pairwise lambda rank
shiyu1994 Feb 29, 2024
09fff25
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Feb 29, 2024
ba3c815
fix pairwise ranking objective initialization
shiyu1994 Feb 29, 2024
d9b537d
keep the original query boundaries and add pairwise query boundaries
shiyu1994 Feb 29, 2024
362baf8
allow empty queries in pairwise query boundaries
shiyu1994 Mar 1, 2024
06597ac
fix query boundaries
shiyu1994 Mar 1, 2024
18e3a1b
clean up
shiyu1994 Mar 1, 2024
43b8582
various fixes
metpavel Mar 1, 2024
ad4e89f
construct all pairs for validation set
shiyu1994 Mar 1, 2024
dc17309
Merge branch 'pairwise-ranking-dev' of https://github.com/microsoft/L…
metpavel Mar 1, 2024
1ad78b2
fix for validation set
shiyu1994 Mar 1, 2024
9cd3b93
fix validation pairs
shiyu1994 Mar 1, 2024
f9d9c07
fatal error when no query boundary is provided
shiyu1994 Mar 1, 2024
97e0a81
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Mar 1, 2024
746bc82
add differential features
shiyu1994 Mar 8, 2024
f9ab075
add differential features
shiyu1994 Mar 20, 2024
7aa170b
bug fixing and efficiency improvement
metpavel Mar 25, 2024
abdb716
add feature group for differential features
shiyu1994 Mar 27, 2024
3cdfd83
refactor template initializations with macro
shiyu1994 Mar 28, 2024
3703495
tree learning with differential features
shiyu1994 Mar 28, 2024
8f55a93
avoid copy sampled values
shiyu1994 Mar 28, 2024
8c3e7be
fix sampled indices
shiyu1994 Apr 2, 2024
5aa2d17
push data into differential features
shiyu1994 Apr 11, 2024
1c319b8
fix differential feature bugs
shiyu1994 Apr 17, 2024
d8eb68b
clean up debug code
shiyu1994 Apr 17, 2024
b088236
fix validation set with differential features
shiyu1994 Apr 18, 2024
2d09897
support row-wise histogram construction with pairwise ranking
shiyu1994 Jun 15, 2024
406d0c1
fix row wise in pairwise ranking
shiyu1994 Jun 20, 2024
6c65d1f
save for debug
shiyu1994 Jun 20, 2024
7738915
update code for debug
shiyu1994 Jun 28, 2024
d6c16df
save changes
shiyu1994 Jul 4, 2024
0d572d7
save changes for debug
shiyu1994 Jul 8, 2024
1f59f85
save changes
shiyu1994 Aug 21, 2024
0618bb2
add bagging by query for lambdarank
shiyu1994 Aug 27, 2024
185bdf6
Merge branch 'master' into bagging/bagging-by-query-for-lambdarank
shiyu1994 Aug 27, 2024
38fa4c2
fix pre-commit
shiyu1994 Aug 27, 2024
2fce147
Merge branch 'bagging/bagging-by-query-for-lambdarank' of https://git…
shiyu1994 Aug 27, 2024
1f7f967
Merge branch 'master' into bagging/bagging-by-query-for-lambdarank
shiyu1994 Aug 29, 2024
9e2a322
fix bagging by query with cuda
shiyu1994 Aug 29, 2024
666c51e
fix bagging by query test case
shiyu1994 Aug 30, 2024
9e2c338
fix bagging by query test case
shiyu1994 Aug 30, 2024
3abbc11
fix bagging by query test case
shiyu1994 Aug 30, 2024
13fa0a3
add #include <vector>
shiyu1994 Aug 30, 2024
b8427b0
merge bagging by query
shiyu1994 Sep 4, 2024
0258f07
update CMakeLists.txt
shiyu1994 Sep 4, 2024
90a95fa
fix bagging by query with pairwise lambdarank
shiyu1994 Sep 20, 2024
306af04
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Sep 20, 2024
b69913d
fix compilation error C3200 with visual studio
shiyu1994 Oct 10, 2024
6dba1cf
clean up main.cpp
shiyu1994 Oct 11, 2024
3b2e29d
Exposing configuration parameters for pairwise ranking
metpavel Oct 18, 2024
f1c32d3
fix bugs and pass by reference for SigmoidCache&
shiyu1994 Nov 8, 2024
51693e2
add pairing approach
shiyu1994 Nov 8, 2024
5071842
add at_least_one_relevant
shiyu1994 Nov 8, 2024
598764b
fix num bin for row wise in pairwise ranking
shiyu1994 Nov 21, 2024
f7deab4
save for debug
shiyu1994 Dec 17, 2024
0d1b310
update doc
shiyu1994 Dec 18, 2024
8f9ab26
add random_k pairing mode
shiyu1994 Feb 18, 2025
d797122
clean up code
shiyu1994 Feb 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions docs/Parameters.rst
Original file line number Diff line number Diff line change
@@ -368,6 +368,10 @@ Learning Control Parameters

- random seed for bagging

- ``bagging_by_query`` :raw-html:`<a id="bagging_by_query" title="Permalink to this parameter" href="#bagging_by_query">&#x1F517;&#xFE0E;</a>`, default = ``false``, type = bool

- whether to do bagging sample by query

- ``feature_fraction`` :raw-html:`<a id="feature_fraction" title="Permalink to this parameter" href="#feature_fraction">&#x1F517;&#xFE0E;</a>`, default = ``1.0``, type = double, aliases: ``sub_feature``, ``colsample_bytree``, constraints: ``0.0 < feature_fraction <= 1.0``

- LightGBM will randomly select a subset of features on each iteration (tree) if ``feature_fraction`` is smaller than ``1.0``. For example, if you set it to ``0.8``, LightGBM will select 80% of features before training each tree
3 changes: 3 additions & 0 deletions include/LightGBM/config.h
Original file line number Diff line number Diff line change
@@ -364,6 +364,9 @@ struct Config {
// desc = random seed for bagging
int bagging_seed = 3;

// desc = whether to do bagging sample by query
bool bagging_by_query = false;

// alias = sub_feature, colsample_bytree
// check = >0.0
// check = <=1.0
5 changes: 5 additions & 0 deletions include/LightGBM/cuda/cuda_objective_function.hpp
Original file line number Diff line number Diff line change
@@ -49,6 +49,11 @@ class CUDAObjectiveInterface: public HOST_OBJECTIVE {
SynchronizeCUDADevice(__FILE__, __LINE__);
}

void GetGradients(const double* scores, const data_size_t /*num_sampled_queries*/, const data_size_t* /*sampled_query_indices*/, score_t* gradients, score_t* hessians) const override {
LaunchGetGradientsKernel(scores, gradients, hessians);
SynchronizeCUDADevice(__FILE__, __LINE__);
}

void RenewTreeOutputCUDA(const double* score, const data_size_t* data_indices_in_leaf, const data_size_t* num_data_in_leaf,
const data_size_t* data_start_in_leaf, const int num_leaves, double* leaf_value) const override {
global_timer.Start("CUDAObjectiveInterface::LaunchRenewTreeOutputCUDAKernel");
11 changes: 11 additions & 0 deletions include/LightGBM/objective_function.h
Original file line number Diff line number Diff line change
@@ -37,6 +37,17 @@ class ObjectiveFunction {
virtual void GetGradients(const double* score,
score_t* gradients, score_t* hessians) const = 0;

/*!
* \brief calculating first order derivative of loss function, used only for baggin by query in lambdarank
* \param score prediction score in this round
* \param num_sampled_queries number of in-bag queries
* \param sampled_query_indices indices of in-bag queries
* \gradients Output gradients
* \hessians Output hessians
*/
virtual void GetGradients(const double* score, const data_size_t /*num_sampled_queries*/, const data_size_t* /*sampled_query_indices*/,
score_t* gradients, score_t* hessians) const { GetGradients(score, gradients, hessians); }

virtual const char* GetName() const = 0;

virtual bool IsConstantHessian() const { return false; }
4 changes: 4 additions & 0 deletions include/LightGBM/sample_strategy.h
Original file line number Diff line number Diff line change
@@ -55,6 +55,10 @@ class SampleStrategy {

bool NeedResizeGradients() const { return need_resize_gradients_; }

virtual data_size_t num_sampled_queries() const { return 0; }

virtual const data_size_t* sampled_query_indices() const { return nullptr; }

protected:
const Config* config_;
const Dataset* train_data_;
95 changes: 91 additions & 4 deletions src/boosting/bagging.hpp
Original file line number Diff line number Diff line change
@@ -7,6 +7,7 @@
#define LIGHTGBM_BOOSTING_BAGGING_HPP_

#include <string>
#include <vector>

namespace LightGBM {

@@ -17,8 +18,11 @@ class BaggingSampleStrategy : public SampleStrategy {
config_ = config;
train_data_ = train_data;
num_data_ = train_data->num_data();
num_queries_ = train_data->metadata().num_queries();
query_boundaries_ = train_data->metadata().query_boundaries();
objective_function_ = objective_function;
num_tree_per_iteration_ = num_tree_per_iteration;
num_threads_ = OMP_NUM_THREADS();
}

~BaggingSampleStrategy() {}
@@ -27,9 +31,10 @@ class BaggingSampleStrategy : public SampleStrategy {
Common::FunctionTimer fun_timer("GBDT::Bagging", global_timer);
// if need bagging
if ((bag_data_cnt_ < num_data_ && iter % config_->bagging_freq == 0) ||
need_re_bagging_) {
need_re_bagging_) {
need_re_bagging_ = false;
auto left_cnt = bagging_runner_.Run<true>(
if (!config_->bagging_by_query) {
auto left_cnt = bagging_runner_.Run<true>(
num_data_,
[=](int, data_size_t cur_start, data_size_t cur_cnt, data_size_t* left,
data_size_t*) {
@@ -43,7 +48,60 @@ class BaggingSampleStrategy : public SampleStrategy {
return cur_left_count;
},
bag_data_indices_.data());
bag_data_cnt_ = left_cnt;
bag_data_cnt_ = left_cnt;
} else {
num_sampled_queries_ = bagging_runner_.Run<true>(
num_queries_,
[=](int, data_size_t cur_start, data_size_t cur_cnt, data_size_t* left,
data_size_t*) {
data_size_t cur_left_count = 0;
cur_left_count = BaggingHelper(cur_start, cur_cnt, left);
return cur_left_count;
}, bag_query_indices_.data());

sampled_query_boundaries_[0] = 0;
OMP_INIT_EX();
#pragma omp parallel for schedule(static) num_threads(num_threads_)
for (data_size_t i = 0; i < num_sampled_queries_; ++i) {
OMP_LOOP_EX_BEGIN();
sampled_query_boundaries_[i + 1] = query_boundaries_[bag_query_indices_[i] + 1] - query_boundaries_[bag_query_indices_[i]];
OMP_LOOP_EX_END();
}
OMP_THROW_EX();

const int num_blocks = Threading::For<data_size_t>(0, num_sampled_queries_ + 1, 128, [this](int thread_index, data_size_t start_index, data_size_t end_index) {
for (data_size_t i = start_index + 1; i < end_index; ++i) {
sampled_query_boundaries_[i] += sampled_query_boundaries_[i - 1];
}
sampled_query_boundaires_thread_buffer_[thread_index] = sampled_query_boundaries_[end_index - 1];
});

for (int thread_index = 1; thread_index < num_blocks; ++thread_index) {
sampled_query_boundaires_thread_buffer_[thread_index] += sampled_query_boundaires_thread_buffer_[thread_index - 1];
}

Threading::For<data_size_t>(0, num_sampled_queries_ + 1, 128, [this](int thread_index, data_size_t start_index, data_size_t end_index) {
if (thread_index > 0) {
for (data_size_t i = start_index; i < end_index; ++i) {
sampled_query_boundaries_[i] += sampled_query_boundaires_thread_buffer_[thread_index - 1];
}
}
});

bag_data_cnt_ = sampled_query_boundaries_[num_sampled_queries_];

Threading::For<data_size_t>(0, num_sampled_queries_, 1, [this](int /*thread_index*/, data_size_t start_index, data_size_t end_index) {
for (data_size_t sampled_query_id = start_index; sampled_query_id < end_index; ++sampled_query_id) {
const data_size_t query_index = bag_query_indices_[sampled_query_id];
const data_size_t data_index_start = query_boundaries_[query_index];
const data_size_t data_index_end = query_boundaries_[query_index + 1];
const data_size_t sampled_query_start = sampled_query_boundaries_[sampled_query_id];
for (data_size_t i = data_index_start; i < data_index_end; ++i) {
bag_data_indices_[sampled_query_start + i - data_index_start] = i;
}
}
});
}
Log::Debug("Re-bagging, using %d data to train", bag_data_cnt_);
// set bagging data to tree learner
if (!is_use_subset_) {
@@ -109,7 +167,14 @@ class BaggingSampleStrategy : public SampleStrategy {
cuda_bag_data_indices_.Resize(num_data_);
}
#endif // USE_CUDA
bagging_runner_.ReSize(num_data_);
if (!config_->bagging_by_query) {
bagging_runner_.ReSize(num_data_);
} else {
bagging_runner_.ReSize(num_queries_);
sampled_query_boundaries_.resize(num_queries_ + 1, 0);
sampled_query_boundaires_thread_buffer_.resize(num_threads_, 0);
bag_query_indices_.resize(num_data_);
}
bagging_rands_.clear();
for (int i = 0;
i < (num_data_ + bagging_rand_block_ - 1) / bagging_rand_block_; ++i) {
@@ -156,6 +221,14 @@ class BaggingSampleStrategy : public SampleStrategy {
return false;
}

data_size_t num_sampled_queries() const override {
return num_sampled_queries_;
}

const data_size_t* sampled_query_indices() const override {
return bag_query_indices_.data();
}

private:
data_size_t BaggingHelper(data_size_t start, data_size_t cnt, data_size_t* buffer) {
if (cnt <= 0) {
@@ -205,6 +278,20 @@ class BaggingSampleStrategy : public SampleStrategy {

/*! \brief whether need restart bagging in continued training */
bool need_re_bagging_;
/*! \brief number of threads */
int num_threads_;
/*! \brief query boundaries of the in-bag queries */
std::vector<data_size_t> sampled_query_boundaries_;
/*! \brief buffer for calculating sampled_query_boundaries_ */
std::vector<data_size_t> sampled_query_boundaires_thread_buffer_;
/*! \brief in-bag query indices */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_query_indices_;
/*! \brief number of queries in the training dataset */
data_size_t num_queries_;
/*! \brief number of in-bag queries */
data_size_t num_sampled_queries_;
/*! \brief query boundaries of the whole training dataset */
const data_size_t* query_boundaries_;
};

} // namespace LightGBM
13 changes: 11 additions & 2 deletions src/boosting/gbdt.cpp
Original file line number Diff line number Diff line change
@@ -224,8 +224,14 @@ void GBDT::Boosting() {
}
// objective function will calculate gradients and hessians
int64_t num_score = 0;
objective_function_->
GetGradients(GetTrainingScore(&num_score), gradients_pointer_, hessians_pointer_);
if (config_->bagging_by_query) {
data_sample_strategy_->Bagging(iter_, tree_learner_.get(), gradients_.data(), hessians_.data());
objective_function_->
GetGradients(GetTrainingScore(&num_score), data_sample_strategy_->num_sampled_queries(), data_sample_strategy_->sampled_query_indices(), gradients_pointer_, hessians_pointer_);
} else {
objective_function_->
GetGradients(GetTrainingScore(&num_score), gradients_pointer_, hessians_pointer_);
}
}

void GBDT::Train(int snapshot_freq, const std::string& model_output_path) {
@@ -368,6 +374,9 @@ bool GBDT::TrainOneIter(const score_t* gradients, const score_t* hessians) {
}

// bagging logic
if (!config_->bagging_by_query) {
data_sample_strategy_->Bagging(iter_, tree_learner_.get(), gradients_.data(), hessians_.data());
}
const bool is_use_subset = data_sample_strategy_->is_use_subset();
const data_size_t bag_data_cnt = data_sample_strategy_->bag_data_cnt();
const std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>>& bag_data_indices = data_sample_strategy_->bag_data_indices();
5 changes: 5 additions & 0 deletions src/io/config.cpp
Original file line number Diff line number Diff line change
@@ -466,6 +466,11 @@ void Config::CheckParamConflict(const std::unordered_map<std::string, std::strin
Log::Warning("Found boosting=goss. For backwards compatibility reasons, LightGBM interprets this as boosting=gbdt, data_sample_strategy=goss."
"To suppress this warning, set data_sample_strategy=goss instead.");
}

if (bagging_by_query && data_sample_strategy != std::string("bagging")) {
Log::Warning("bagging_by_query=true is only compatible with data_sample_strategy=bagging. Setting bagging_by_query=false.");
bagging_by_query = false;
}
}

std::string Config::ToString() const {
6 changes: 6 additions & 0 deletions src/io/config_auto.cpp
Original file line number Diff line number Diff line change
@@ -208,6 +208,7 @@ const std::unordered_set<std::string>& Config::parameter_set() {
"neg_bagging_fraction",
"bagging_freq",
"bagging_seed",
"bagging_by_query",
"feature_fraction",
"feature_fraction_bynode",
"feature_fraction_seed",
@@ -378,6 +379,8 @@ void Config::GetMembersFromString(const std::unordered_map<std::string, std::str

GetInt(params, "bagging_seed", &bagging_seed);

GetBool(params, "bagging_by_query", &bagging_by_query);

GetDouble(params, "feature_fraction", &feature_fraction);
CHECK_GT(feature_fraction, 0.0);
CHECK_LE(feature_fraction, 1.0);
@@ -691,6 +694,7 @@ std::string Config::SaveMembersToString() const {
str_buf << "[neg_bagging_fraction: " << neg_bagging_fraction << "]\n";
str_buf << "[bagging_freq: " << bagging_freq << "]\n";
str_buf << "[bagging_seed: " << bagging_seed << "]\n";
str_buf << "[bagging_by_query: " << bagging_by_query << "]\n";
str_buf << "[feature_fraction: " << feature_fraction << "]\n";
str_buf << "[feature_fraction_bynode: " << feature_fraction_bynode << "]\n";
str_buf << "[feature_fraction_seed: " << feature_fraction_seed << "]\n";
@@ -817,6 +821,7 @@ const std::unordered_map<std::string, std::vector<std::string>>& Config::paramet
{"neg_bagging_fraction", {"neg_sub_row", "neg_subsample", "neg_bagging"}},
{"bagging_freq", {"subsample_freq"}},
{"bagging_seed", {"bagging_fraction_seed"}},
{"bagging_by_query", {}},
{"feature_fraction", {"sub_feature", "colsample_bytree"}},
{"feature_fraction_bynode", {"sub_feature_bynode", "colsample_bynode"}},
{"feature_fraction_seed", {}},
@@ -962,6 +967,7 @@ const std::unordered_map<std::string, std::string>& Config::ParameterTypes() {
{"neg_bagging_fraction", "double"},
{"bagging_freq", "int"},
{"bagging_seed", "int"},
{"bagging_by_query", "bool"},
{"feature_fraction", "double"},
{"feature_fraction_bynode", "double"},
{"feature_fraction_seed", "int"},
18 changes: 12 additions & 6 deletions src/objective/rank_objective.hpp
Original file line number Diff line number Diff line change
@@ -160,19 +160,21 @@ class RankingObjective : public ObjectiveFunction {
pos_biases_.resize(num_position_ids_, 0.0);
}

void GetGradients(const double* score, score_t* gradients,
score_t* hessians) const override {
void GetGradients(const double* score, const data_size_t num_sampled_queries, const data_size_t* sampled_query_indices,
score_t* gradients, score_t* hessians) const override {
const data_size_t num_queries = (sampled_query_indices == nullptr ? num_queries_ : num_sampled_queries);
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(guided)
for (data_size_t i = 0; i < num_queries_; ++i) {
const data_size_t start = query_boundaries_[i];
const data_size_t cnt = query_boundaries_[i + 1] - query_boundaries_[i];
for (data_size_t i = 0; i < num_queries; ++i) {
const data_size_t query_index = (sampled_query_indices == nullptr ? i : sampled_query_indices[i]);
const data_size_t start = query_boundaries_[query_index];
const data_size_t cnt = query_boundaries_[query_index + 1] - query_boundaries_[query_index];
std::vector<double> score_adjusted;
if (num_position_ids_ > 0) {
for (data_size_t j = 0; j < cnt; ++j) {
score_adjusted.push_back(score[start + j] + pos_biases_[positions_[start + j]]);
}
}
GetGradientsForOneQuery(i, cnt, label_ + start, num_position_ids_ > 0 ? score_adjusted.data() : score + start,
GetGradientsForOneQuery(query_index, cnt, label_ + start, num_position_ids_ > 0 ? score_adjusted.data() : score + start,
gradients + start, hessians + start);
if (weights_ != nullptr) {
for (data_size_t j = 0; j < cnt; ++j) {
@@ -188,6 +190,10 @@ class RankingObjective : public ObjectiveFunction {
}
}

void GetGradients(const double* score, score_t* gradients, score_t* hessians) const override {
GetGradients(score, num_queries_, nullptr, gradients, hessians);
}

virtual void GetGradientsForOneQuery(data_size_t query_id, data_size_t cnt,
const label_t* label,
const double* score, score_t* lambdas,
24 changes: 24 additions & 0 deletions tests/python_package_test/test_engine.py
Original file line number Diff line number Diff line change
@@ -4399,3 +4399,27 @@ def test_quantized_training():
quant_bst = lgb.train(bst_params, ds, num_boost_round=10)
quant_rmse = np.sqrt(np.mean((quant_bst.predict(X) - y) ** 2))
assert quant_rmse < rmse + 6.0


def test_bagging_by_query_in_lambdarank():
rank_example_dir = Path(__file__).absolute().parents[2] / "examples" / "lambdarank"
X_train, y_train = load_svmlight_file(str(rank_example_dir / "rank.train"))
q_train = np.loadtxt(str(rank_example_dir / "rank.train.query"))
X_test, y_test = load_svmlight_file(str(rank_example_dir / "rank.test"))
q_test = np.loadtxt(str(rank_example_dir / "rank.test.query"))
params = {"objective": "lambdarank", "verbose": -1, "metric": "ndcg", "ndcg_eval_at": "5"}
lgb_train = lgb.Dataset(X_train, y_train, group=q_train, params=params)
lgb_test = lgb.Dataset(X_test, y_test, group=q_test, params=params)
gbm = lgb.train(params, lgb_train, num_boost_round=50, valid_sets=[lgb_test])
ndcg_score = gbm.best_score["valid_0"]["ndcg@5"]

params.update({"bagging_by_query": True, "bagging_fraction": 0.1, "bagging_freq": 1})
gbm_bagging_by_query = lgb.train(params, lgb_train, num_boost_round=50, valid_sets=[lgb_test])
ndcg_score_bagging_by_query = gbm_bagging_by_query.best_score["valid_0"]["ndcg@5"]

params.update({"bagging_by_query": False, "bagging_fraction": 0.1, "bagging_freq": 1})
gbm_no_bagging_by_query = lgb.train(params, lgb_train, num_boost_round=50, valid_sets=[lgb_test])
ndcg_score_no_bagging_by_query = gbm_no_bagging_by_query.best_score["valid_0"]["ndcg@5"]
print(ndcg_score_bagging_by_query, ndcg_score, ndcg_score_no_bagging_by_query)
assert ndcg_score_bagging_by_query >= ndcg_score - 0.1
assert ndcg_score_no_bagging_by_query >= ndcg_score - 0.1