Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[c++] Initial Work for Pairwise Ranking #6182

Open
wants to merge 88 commits into
base: master
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
88 commits
Select commit Hold shift + click to select a range
9ae3476
initial work for pairwise ranking (dataset part)
shiyu1994 Nov 8, 2023
2314099
remove unrelated changes
shiyu1994 Nov 8, 2023
06ddf68
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Nov 8, 2023
42e91e2
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Nov 23, 2023
a8379d4
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Dec 1, 2023
da5f02d
first version of pairwie ranking bin
shiyu1994 Dec 5, 2023
9d0afd9
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Dec 5, 2023
0cb436d
templates for bins in pairwise ranking dataset
shiyu1994 Dec 5, 2023
fc9b381
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Dec 5, 2023
6fbc674
fix lint issues and compilation errors
shiyu1994 Dec 6, 2023
6082913
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Dec 6, 2023
9e16dc3
add methods for pairwise bin
shiyu1994 Dec 6, 2023
6154bde
instantiate templates
shiyu1994 Dec 6, 2023
3a646eb
remove unrelated files
shiyu1994 Dec 6, 2023
9e77ab9
add return values for unimplemented methods
shiyu1994 Dec 7, 2023
eba4560
add new files and windows/LightGBM.vcxproj and windows/LightGBM.vcxpr…
shiyu1994 Dec 7, 2023
f1d2281
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Dec 7, 2023
873d7ad
create pairwise dataset
shiyu1994 Dec 7, 2023
3838b9b
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Dec 7, 2023
986a979
set num_data_ of pairwise dataset
shiyu1994 Dec 7, 2023
c40965a
skip query with no paired items
shiyu1994 Dec 15, 2023
97d34d7
store original query information
shiyu1994 Jan 31, 2024
1e57e27
copy position information for pairwise dataset
shiyu1994 Jan 31, 2024
1699c06
rename to pointwise members
shiyu1994 Feb 1, 2024
d5b6f0a
adding initial support for pairwise gradients and NDCG eval with pair…
metpavel Feb 9, 2024
2ee1199
fix score offsets
metpavel Feb 9, 2024
fe10a2c
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Feb 19, 2024
0aaf090
skip copy for weights and label if none
shiyu1994 Feb 19, 2024
8714bfb
fix pairwise dataset bugs
shiyu1994 Feb 29, 2024
250996b
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Feb 29, 2024
38b2f3e
fix validation set with pairwise lambda rank
shiyu1994 Feb 29, 2024
09fff25
Merge branch 'pairwise-ranking-dev' of https://github.com/Microsoft/L…
shiyu1994 Feb 29, 2024
ba3c815
fix pairwise ranking objective initialization
shiyu1994 Feb 29, 2024
d9b537d
keep the original query boundaries and add pairwise query boundaries
shiyu1994 Feb 29, 2024
362baf8
allow empty queries in pairwise query boundaries
shiyu1994 Mar 1, 2024
06597ac
fix query boundaries
shiyu1994 Mar 1, 2024
18e3a1b
clean up
shiyu1994 Mar 1, 2024
43b8582
various fixes
metpavel Mar 1, 2024
ad4e89f
construct all pairs for validation set
shiyu1994 Mar 1, 2024
dc17309
Merge branch 'pairwise-ranking-dev' of https://github.com/microsoft/L…
metpavel Mar 1, 2024
1ad78b2
fix for validation set
shiyu1994 Mar 1, 2024
9cd3b93
fix validation pairs
shiyu1994 Mar 1, 2024
f9d9c07
fatal error when no query boundary is provided
shiyu1994 Mar 1, 2024
97e0a81
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Mar 1, 2024
746bc82
add differential features
shiyu1994 Mar 8, 2024
f9ab075
add differential features
shiyu1994 Mar 20, 2024
7aa170b
bug fixing and efficiency improvement
metpavel Mar 25, 2024
abdb716
add feature group for differential features
shiyu1994 Mar 27, 2024
3cdfd83
refactor template initializations with macro
shiyu1994 Mar 28, 2024
3703495
tree learning with differential features
shiyu1994 Mar 28, 2024
8f55a93
avoid copy sampled values
shiyu1994 Mar 28, 2024
8c3e7be
fix sampled indices
shiyu1994 Apr 2, 2024
5aa2d17
push data into differential features
shiyu1994 Apr 11, 2024
1c319b8
fix differential feature bugs
shiyu1994 Apr 17, 2024
d8eb68b
clean up debug code
shiyu1994 Apr 17, 2024
b088236
fix validation set with differential features
shiyu1994 Apr 18, 2024
2d09897
support row-wise histogram construction with pairwise ranking
shiyu1994 Jun 15, 2024
406d0c1
fix row wise in pairwise ranking
shiyu1994 Jun 20, 2024
6c65d1f
save for debug
shiyu1994 Jun 20, 2024
7738915
update code for debug
shiyu1994 Jun 28, 2024
d6c16df
save changes
shiyu1994 Jul 4, 2024
0d572d7
save changes for debug
shiyu1994 Jul 8, 2024
1f59f85
save changes
shiyu1994 Aug 21, 2024
0618bb2
add bagging by query for lambdarank
shiyu1994 Aug 27, 2024
185bdf6
Merge branch 'master' into bagging/bagging-by-query-for-lambdarank
shiyu1994 Aug 27, 2024
38fa4c2
fix pre-commit
shiyu1994 Aug 27, 2024
2fce147
Merge branch 'bagging/bagging-by-query-for-lambdarank' of https://git…
shiyu1994 Aug 27, 2024
1f7f967
Merge branch 'master' into bagging/bagging-by-query-for-lambdarank
shiyu1994 Aug 29, 2024
9e2a322
fix bagging by query with cuda
shiyu1994 Aug 29, 2024
666c51e
fix bagging by query test case
shiyu1994 Aug 30, 2024
9e2c338
fix bagging by query test case
shiyu1994 Aug 30, 2024
3abbc11
fix bagging by query test case
shiyu1994 Aug 30, 2024
13fa0a3
add #include <vector>
shiyu1994 Aug 30, 2024
b8427b0
merge bagging by query
shiyu1994 Sep 4, 2024
0258f07
update CMakeLists.txt
shiyu1994 Sep 4, 2024
90a95fa
fix bagging by query with pairwise lambdarank
shiyu1994 Sep 20, 2024
306af04
Merge branch 'master' into pairwise-ranking-dev
shiyu1994 Sep 20, 2024
b69913d
fix compilation error C3200 with visual studio
shiyu1994 Oct 10, 2024
6dba1cf
clean up main.cpp
shiyu1994 Oct 11, 2024
3b2e29d
Exposing configuration parameters for pairwise ranking
metpavel Oct 18, 2024
f1c32d3
fix bugs and pass by reference for SigmoidCache&
shiyu1994 Nov 8, 2024
51693e2
add pairing approach
shiyu1994 Nov 8, 2024
5071842
add at_least_one_relevant
shiyu1994 Nov 8, 2024
598764b
fix num bin for row wise in pairwise ranking
shiyu1994 Nov 21, 2024
f7deab4
save for debug
shiyu1994 Dec 17, 2024
0d1b310
update doc
shiyu1994 Dec 18, 2024
8f9ab26
add random_k pairing mode
shiyu1994 Feb 18, 2025
d797122
clean up code
shiyu1994 Feb 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
save changes
  • Loading branch information
shiyu1994 committed Aug 21, 2024
commit 1f59f8525baaf5bb8626490b407c96d72ebcc2f6
3 changes: 3 additions & 0 deletions include/LightGBM/feature_group.h
Original file line number Diff line number Diff line change
@@ -286,7 +286,10 @@ class FeatureGroup {
}

inline void CopySubrowByCol(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices, int fidx) {
Log::Warning("in CopySubrowByCol");
if (!is_multi_val_) {
Log::Warning("is not multi val");
Log::Warning("full_feature->bin_data_.get() = %ld", full_feature->bin_data_.get());
bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices);
} else {
multi_bin_data_[fidx]->CopySubrow(full_feature->multi_bin_data_[fidx].get(), used_indices, num_used_indices);
5 changes: 5 additions & 0 deletions include/LightGBM/objective_function.h
Original file line number Diff line number Diff line change
@@ -108,6 +108,11 @@ class ObjectiveFunction {
virtual bool NeedConvertOutputCUDA () const { return false; }

#endif // USE_CUDA

virtual void SetDataIndices(const data_size_t* used_data_indices) const { used_data_indices_ = used_data_indices; }

private:
mutable const data_size_t* used_data_indices_ = nullptr;
};

void UpdatePointwiseScoresForOneQuery(data_size_t query_id, double* score_pointwise, const double* score_pairwise, data_size_t cnt_pointwise,
5 changes: 4 additions & 1 deletion src/boosting/bagging.hpp
Original file line number Diff line number Diff line change
@@ -60,6 +60,7 @@ class BaggingSampleStrategy : public SampleStrategy {
} else {
// get subset
tmp_subset_->ReSize(bag_data_cnt_);
Log::Warning("bag_data_indices_.size() = %ld, bag_data_cnt_ = %d", bag_data_indices_.size(), bag_data_cnt_);
tmp_subset_->CopySubrow(train_data_, bag_data_indices_.data(),
bag_data_cnt_, false);
#ifdef USE_CUDA
@@ -119,8 +120,10 @@ class BaggingSampleStrategy : public SampleStrategy {
(static_cast<double>(bag_data_cnt_) / num_data_) / config_->bagging_freq;
is_use_subset_ = false;
if (config_->device_type != std::string("cuda")) {
const int group_threshold_usesubset = 100;
const int group_threshold_usesubset = 200;
const double average_bag_rate_threshold = 0.5;
Log::Warning("train_data_->num_feature_groups() = %d", train_data_->num_feature_groups());
Log::Warning("average_bag_rate = %f", average_bag_rate);
if (average_bag_rate <= average_bag_rate_threshold
&& (train_data_->num_feature_groups() < group_threshold_usesubset)) {
if (tmp_subset_ == nullptr || is_change_dataset) {
3 changes: 2 additions & 1 deletion src/boosting/gbdt.cpp
Original file line number Diff line number Diff line change
@@ -339,6 +339,8 @@ bool GBDT::TrainOneIter(const score_t* gradients, const score_t* hessians) {
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
init_scores[cur_tree_id] = BoostFromAverage(cur_tree_id, true);
}
data_sample_strategy_->Bagging(iter_, tree_learner_.get(), gradients_.data(), hessians_.data());
objective_function_->SetDataIndices(data_sample_strategy_->bag_data_indices().data());
Boosting();
gradients = gradients_pointer_;
hessians = hessians_pointer_;
@@ -361,7 +363,6 @@ bool GBDT::TrainOneIter(const score_t* gradients, const score_t* hessians) {
}

// bagging logic
data_sample_strategy_->Bagging(iter_, tree_learner_.get(), gradients_.data(), hessians_.data());
const bool is_use_subset = data_sample_strategy_->is_use_subset();
const data_size_t bag_data_cnt = data_sample_strategy_->bag_data_cnt();
const std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>>& bag_data_indices = data_sample_strategy_->bag_data_indices();
19 changes: 13 additions & 6 deletions src/io/dataset.cpp
Original file line number Diff line number Diff line change
@@ -1140,6 +1140,7 @@ void Dataset::CopySubrow(const Dataset* fullset,
data_size_t num_used_indices, bool need_meta_data) {
CHECK_EQ(num_used_indices, num_data_);

Log::Warning("copy subrow here !!!!");
std::vector<int> group_ids, subfeature_ids;
group_ids.reserve(num_features_);
subfeature_ids.reserve(num_features_);
@@ -1155,20 +1156,24 @@ void Dataset::CopySubrow(const Dataset* fullset,
subfeature_ids.emplace_back(-1);
}
}
Log::Warning("copy subrow step 0 !!!!");
int num_copy_tasks = static_cast<int>(group_ids.size());

OMP_INIT_EX();
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(dynamic)
// OMP_INIT_EX();
// #pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(dynamic)
for (int task_id = 0; task_id < num_copy_tasks; ++task_id) {
OMP_LOOP_EX_BEGIN();
// OMP_LOOP_EX_BEGIN();
Log::Warning("before copy sub row by col 0");
int group = group_ids[task_id];
int subfeature = subfeature_ids[task_id];
Log::Warning("before copy sub row by col 1");
feature_groups_[group]->CopySubrowByCol(fullset->feature_groups_[group].get(),
used_indices, num_used_indices, subfeature);
OMP_LOOP_EX_END();
Log::Warning("after copy sub row by col");
// OMP_LOOP_EX_END();
}
OMP_THROW_EX();
// OMP_THROW_EX();

Log::Warning("copy subrow step 1 !!!!");
if (need_meta_data) {
metadata_.Init(fullset->metadata_, used_indices, num_used_indices);
}
@@ -1188,6 +1193,8 @@ void Dataset::CopySubrow(const Dataset* fullset,
device_type_ = fullset->device_type_;
gpu_device_id_ = fullset->gpu_device_id_;

Log::Warning("copy subrow step 2 !!!!");

#ifdef USE_CUDA
if (device_type_ == std::string("cuda")) {
if (cuda_column_data_ == nullptr) {
4 changes: 4 additions & 0 deletions src/io/dense_bin.hpp
Original file line number Diff line number Diff line change
@@ -566,8 +566,11 @@ class DenseBin : public Bin {

void CopySubrow(const Bin* full_bin, const data_size_t* used_indices,
data_size_t num_used_indices) override {
Log::Warning("is dense");
auto other_bin = dynamic_cast<const DenseBin<VAL_T, IS_4BIT>*>(full_bin);
Log::Warning("other bin created");
if (IS_4BIT) {
Log::Warning("is 4 bit");
const data_size_t rest = num_used_indices & 1;
for (int i = 0; i < num_used_indices - rest; i += 2) {
data_size_t idx = used_indices[i];
@@ -586,6 +589,7 @@ class DenseBin : public Bin {
}
} else {
for (int i = 0; i < num_used_indices; ++i) {
CHECK_LT(used_indices[i], data_.size());
data_[i] = other_bin->data_[used_indices[i]];
}
}
6 changes: 3 additions & 3 deletions src/io/multi_val_pairwise_lambdarank_bin.hpp
Original file line number Diff line number Diff line change
@@ -71,9 +71,9 @@ class MultiValDensePairwiseLambdarankBin: public MultiValPairwiseLambdarankBin<B
// if (bin != 0) {
// Log::Warning("first bin = %d, num_feature_ = %d", bin, this->num_feature_);
// }
if (j == 0) {
Log::Warning("group index = %d bin = %d gradient = %f hessian = %f", j, bin, gradient, hessian);
}
// if (j == 0) {
// Log::Warning("group index = %d bin = %d gradient = %f hessian = %f", j, bin, gradient, hessian);
// }

const auto ti = (bin + this->offsets_[j]) << 1;
grad[ti] += gradient;
13 changes: 7 additions & 6 deletions src/io/pairwise_lambdarank_bin.cpp
Original file line number Diff line number Diff line change
@@ -52,6 +52,7 @@ void PairwiseRankingBin<BIN_TYPE, ITERATOR_TYPE>::Push(int tid, data_size_t idx,

template <typename BIN_TYPE, template<typename> class ITERATOR_TYPE>
void PairwiseRankingBin<BIN_TYPE, ITERATOR_TYPE>::CopySubrow(const Bin* full_bin, const data_size_t* used_indices, data_size_t num_used_indices) {
Log::Warning("copy subrow in pairwie ranking bin");
unpaired_bin_->CopySubrow(full_bin, used_indices, num_used_indices);
}

@@ -98,9 +99,9 @@ void DensePairwiseRankingBin<VAL_T, IS_4BIT, ITERATOR_TYPE>::ConstructHistogramI
for (; i < pf_end; ++i) {
const auto paired_idx = USE_INDICES ? data_indices[i] : i;
const auto ti = GetBinAt(paired_idx) << 1;
if (this->group_index_ == 0) {
Log::Warning("group index = %d bin = %d gradient = %f hessian = %f", this->group_index_, ti / 2, ordered_gradients[i], ordered_hessians[i]);
}
// if (this->group_index_ == 0) {
// Log::Warning("group index = %d bin = %d gradient = %f hessian = %f", this->group_index_, ti / 2, ordered_gradients[i], ordered_hessians[i]);
// }
if (USE_HESSIAN) {
grad[ti] += ordered_gradients[i];
hess[ti] += ordered_hessians[i];
@@ -113,9 +114,9 @@ void DensePairwiseRankingBin<VAL_T, IS_4BIT, ITERATOR_TYPE>::ConstructHistogramI
for (; i < end; ++i) {
const auto paired_idx = USE_INDICES ? data_indices[i] : i;
const auto ti = GetBinAt(paired_idx) << 1;
if (this->group_index_ == 0) {
Log::Warning("group index = %d bin = %d gradient = %f hessian = %f", this->group_index_, ti / 2, ordered_gradients[i], ordered_hessians[i]);
}
// if (this->group_index_ == 0) {
// Log::Warning("group index = %d bin = %d gradient = %f hessian = %f", this->group_index_, ti / 2, ordered_gradients[i], ordered_hessians[i]);
// }
if (USE_HESSIAN) {
grad[ti] += ordered_gradients[i];
hess[ti] += ordered_hessians[i];
2 changes: 2 additions & 0 deletions src/io/sparse_bin.hpp
Original file line number Diff line number Diff line change
@@ -745,9 +745,11 @@ class SparseBin : public Bin {

void CopySubrow(const Bin* full_bin, const data_size_t* used_indices,
data_size_t num_used_indices) override {
Log::Warning("is sparse");
auto other_bin = dynamic_cast<const SparseBin<VAL_T>*>(full_bin);
deltas_.clear();
vals_.clear();
Log::Warning("is sparse");
data_size_t start = 0;
if (num_used_indices > 0) {
start = used_indices[0];
7 changes: 5 additions & 2 deletions src/io/train_share_states.cpp
Original file line number Diff line number Diff line change
@@ -374,6 +374,7 @@ void TrainingShareStates::CalcBinOffsets(const std::vector<std::unique_ptr<Featu
offsets->clear();
feature_hist_offsets_.clear();
if (in_is_col_wise) {
// Log::Fatal("not supported 0");
uint32_t cur_num_bin = 0;
uint32_t hist_cur_num_bin = 0;
for (int group = 0; group < static_cast<int>(feature_groups.size()); ++group) {
@@ -438,9 +439,10 @@ void TrainingShareStates::CalcBinOffsets(const std::vector<std::unique_ptr<Featu
}
}
sum_dense_ratio /= ncol;
const bool is_sparse_row_wise = (1.0f - sum_dense_ratio) >=
MultiValBin::multi_val_bin_sparse_threshold ? 1 : 0;
const bool is_sparse_row_wise = false; //(1.0f - sum_dense_ratio) >=
// MultiValBin::multi_val_bin_sparse_threshold ? 1 : 0;
if (is_sparse_row_wise) {
// Log::Fatal("not supported 1");
int cur_num_bin = 1;
uint32_t hist_cur_num_bin = 1;
for (int group = 0; group < static_cast<int>(feature_groups.size()); ++group) {
@@ -474,6 +476,7 @@ void TrainingShareStates::CalcBinOffsets(const std::vector<std::unique_ptr<Featu
for (int group = 0; group < static_cast<int>(feature_groups.size()); ++group) {
const std::unique_ptr<FeatureGroup>& feature_group = feature_groups[group];
if (feature_group->is_multi_val_) {
Log::Fatal("not supported 2");
for (int i = 0; i < feature_group->num_feature_; ++i) {
const std::unique_ptr<BinMapper>& bin_mapper = feature_group->bin_mappers_[i];
if (group == 0 && i == 0 && bin_mapper->GetMostFreqBin() > 0) {
4 changes: 2 additions & 2 deletions src/treelearner/serial_tree_learner.cpp
Original file line number Diff line number Diff line change
@@ -757,7 +757,7 @@ void SerialTreeLearner::SplitInner(Tree* tree, int best_leaf, int* left_leaf,
int* right_leaf, bool update_cnt) {
Common::FunctionTimer fun_timer("SerialTreeLearner::SplitInner", global_timer);

histogram_pool_.DumpContent();
// histogram_pool_.DumpContent();

SplitInfo& best_split_info = best_split_per_leaf_[best_leaf];
const int inner_feature_index =
@@ -900,7 +900,7 @@ void SerialTreeLearner::SplitInner(Tree* tree, int best_leaf, int* left_leaf,
}

// #ifdef DEBUG
CheckSplit(best_split_info, *left_leaf, *right_leaf);
// CheckSplit(best_split_info, *left_leaf, *right_leaf);
// #endif

auto leaves_need_update = constraints_->Update(