Skip to content

Commit

Permalink
Fix bugs in RF (#1906)
Browse files Browse the repository at this point in the history
* fix RF's bugs

* fix tests

* rollback num_iterations

* fix a bug and reduce memory costs

* reduce memory cost
  • Loading branch information
guolinke committed Dec 17, 2018
1 parent 0c5f390 commit cba8244
Show file tree
Hide file tree
Showing 11 changed files with 214 additions and 69 deletions.
7 changes: 7 additions & 0 deletions include/LightGBM/objective_function.h
Expand Up @@ -42,6 +42,13 @@ class ObjectiveFunction {
const data_size_t*,
data_size_t) const { return ori_output; }

virtual double RenewTreeOutput(double ori_output, double,
const data_size_t*,
const data_size_t*,
data_size_t) const {
return ori_output;
}

virtual double BoostFromScore(int /*class_id*/) const { return 0.0; }

virtual bool ClassNeedTrain(int /*class_id*/) const { return true; }
Expand Down
3 changes: 3 additions & 0 deletions include/LightGBM/tree_learner.h
Expand Up @@ -75,6 +75,9 @@ class TreeLearner {
virtual void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, const double* prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const = 0;

virtual void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, double prediction,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const = 0;

TreeLearner() = default;
/*! \brief Disable copy */
TreeLearner& operator=(const TreeLearner&) = delete;
Expand Down
14 changes: 8 additions & 6 deletions src/boosting/gbdt.cpp
Expand Up @@ -308,15 +308,17 @@ double ObtainAutomaticInitialScore(const ObjectiveFunction* fobj, int class_id)
return init_score;
}

double GBDT::BoostFromAverage(int class_id) {
double GBDT::BoostFromAverage(int class_id, bool update_scorer) {
// boosting from average label; or customized "average" if implemented for the current objective
if (models_.empty() && !train_score_updater_->has_init_score() && objective_function_ != nullptr) {
if (config_->boost_from_average || (train_data_ != nullptr && train_data_->num_features() == 0)) {
double init_score = ObtainAutomaticInitialScore(objective_function_, class_id);
if (std::fabs(init_score) > kEpsilon) {
train_score_updater_->AddScore(init_score, class_id);
for (auto& score_updater : valid_score_updater_) {
score_updater->AddScore(init_score, class_id);
if (update_scorer) {
train_score_updater_->AddScore(init_score, class_id);
for (auto& score_updater : valid_score_updater_) {
score_updater->AddScore(init_score, class_id);
}
}
Log::Info("Start training from score %lf", init_score);
return init_score;
Expand All @@ -335,7 +337,7 @@ bool GBDT::TrainOneIter(const score_t* gradients, const score_t* hessians) {
// boosting first
if (gradients == nullptr || hessians == nullptr) {
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
init_scores[cur_tree_id] = BoostFromAverage(cur_tree_id);
init_scores[cur_tree_id] = BoostFromAverage(cur_tree_id, true);
}
Boosting();
gradients = gradients_.data();
Expand Down Expand Up @@ -597,7 +599,7 @@ void GBDT::GetPredictAt(int data_idx, double* out_result, int64_t* out_len) {
num_data = valid_score_updater_[used_idx]->num_data();
*out_len = static_cast<int64_t>(num_data) * num_class_;
}
if (objective_function_ != nullptr && !average_output_) {
if (objective_function_ != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
std::vector<double> tree_pred(num_tree_per_iteration_);
Expand Down
2 changes: 1 addition & 1 deletion src/boosting/gbdt.h
Expand Up @@ -407,7 +407,7 @@ class GBDT : public GBDTBase {
*/
std::string OutputMetric(int iter);

double BoostFromAverage(int class_id);
double BoostFromAverage(int class_id, bool update_scorer);

/*! \brief current iteration */
int iter_;
Expand Down
4 changes: 2 additions & 2 deletions src/boosting/gbdt_model_text.cpp
Expand Up @@ -152,7 +152,7 @@ std::string GBDT::ModelToIfElse(int num_iteration) const {
str_buf << "\t\t\t" << "output[k] /= num_iteration_for_pred_;" << '\n';
str_buf << "\t\t" << "}" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "\t" << "else if (objective_function_ != nullptr) {" << '\n';
str_buf << "\t" << "if (objective_function_ != nullptr) {" << '\n';
str_buf << "\t\t" << "objective_function_->ConvertOutput(output, output);" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "}" << '\n';
Expand All @@ -166,7 +166,7 @@ std::string GBDT::ModelToIfElse(int num_iteration) const {
str_buf << "\t\t\t" << "output[k] /= num_iteration_for_pred_;" << '\n';
str_buf << "\t\t" << "}" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "\t" << "else if (objective_function_ != nullptr) {" << '\n';
str_buf << "\t" << "if (objective_function_ != nullptr) {" << '\n';
str_buf << "\t\t" << "objective_function_->ConvertOutput(output, output);" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "}" << '\n';
Expand Down
6 changes: 4 additions & 2 deletions src/boosting/gbdt_prediction.cpp
Expand Up @@ -52,7 +52,8 @@ void GBDT::Predict(const double* features, double* output, const PredictionEarly
for (int k = 0; k < num_tree_per_iteration_; ++k) {
output[k] /= num_iteration_for_pred_;
}
} else if (objective_function_ != nullptr) {
}
if (objective_function_ != nullptr) {
objective_function_->ConvertOutput(output, output);
}
}
Expand All @@ -63,7 +64,8 @@ void GBDT::PredictByMap(const std::unordered_map<int, double>& features, double*
for (int k = 0; k < num_tree_per_iteration_; ++k) {
output[k] /= num_iteration_for_pred_;
}
} else if (objective_function_ != nullptr) {
}
if (objective_function_ != nullptr) {
objective_function_->ConvertOutput(output, output);
}
}
Expand Down
119 changes: 62 additions & 57 deletions src/boosting/rf.hpp
Expand Up @@ -15,17 +15,17 @@ namespace LightGBM {
/*!
* \brief Rondom Forest implementation
*/
class RF: public GBDT {
class RF : public GBDT {
public:

RF() : GBDT() {
RF() : GBDT() {
average_output_ = true;
}

~RF() {}

void Init(const Config* config, const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override {
const std::vector<const Metric*>& training_metrics) override {
CHECK(config->bagging_freq > 0 && config->bagging_fraction < 1.0f && config->bagging_fraction > 0.0f);
CHECK(config->feature_fraction <= 1.0f && config->feature_fraction > 0.0f);
GBDT::Init(config, train_data, objective_function, training_metrics);
Expand All @@ -37,17 +37,15 @@ class RF: public GBDT {
} else {
CHECK(train_data->metadata().init_score() == nullptr);
}
// cannot use RF for multi-class.
CHECK(num_tree_per_iteration_ == num_class_);
// not shrinkage rate for the RF
shrinkage_rate_ = 1.0f;
// only boosting one time
GetRFTargets(train_data);
Boosting();
if (is_use_subset_ && bag_data_cnt_ < num_data_) {
tmp_grad_.resize(num_data_);
tmp_hess_.resize(num_data_);
}
tmp_score_.resize(num_data_, 0.0);
}

void ResetConfig(const Config* config) override {
Expand All @@ -59,54 +57,41 @@ class RF: public GBDT {
}

void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override {
const std::vector<const Metric*>& training_metrics) override {
GBDT::ResetTrainingData(train_data, objective_function, training_metrics);
if (iter_ + num_init_iteration_ > 0) {
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
train_score_updater_->MultiplyScore(1.0f / (iter_ + num_init_iteration_), cur_tree_id);
}
}
// cannot use RF for multi-class.
CHECK(num_tree_per_iteration_ == num_class_);
// only boosting one time
GetRFTargets(train_data);
Boosting();
if (is_use_subset_ && bag_data_cnt_ < num_data_) {
tmp_grad_.resize(num_data_);
tmp_hess_.resize(num_data_);
}
tmp_score_.resize(num_data_, 0.0);
}

void GetRFTargets(const Dataset* train_data) {
auto label_ptr = train_data->metadata().label();
std::fill(hessians_.begin(), hessians_.end(), 1.0f);
if (num_tree_per_iteration_ == 1) {
OMP_INIT_EX();
#pragma omp parallel for schedule(static,1)
for (data_size_t i = 0; i < train_data->num_data(); ++i) {
OMP_LOOP_EX_BEGIN();
score_t label = label_ptr[i];
gradients_[i] = static_cast<score_t>(-label);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
void Boosting() override {
if (objective_function_ == nullptr) {
Log::Fatal("No object function provided");
}
init_scores_.resize(num_tree_per_iteration_, 0.0);
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
init_scores_[cur_tree_id] = BoostFromAverage(cur_tree_id, false);
}
else {
std::fill(gradients_.begin(), gradients_.end(), 0.0f);
OMP_INIT_EX();
#pragma omp parallel for schedule(static,1)
for (data_size_t i = 0; i < train_data->num_data(); ++i) {
OMP_LOOP_EX_BEGIN();
score_t label = label_ptr[i];
gradients_[i + static_cast<int>(label) * num_data_] = -1.0f;
OMP_LOOP_EX_END();
size_t total_size = static_cast<size_t>(num_data_) * num_tree_per_iteration_;
std::vector<double> tmp_scores(total_size, 0.0f);
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_tree_per_iteration_; ++j) {
size_t bias = static_cast<size_t>(j)* num_data_;
for (data_size_t i = 0; i < num_data_; ++i) {
tmp_scores[bias + i] = init_scores_[j];
}
OMP_THROW_EX();
}
}

void Boosting() override {

objective_function_->
GetGradients(tmp_scores.data(), gradients_.data(), hessians_.data());
}

bool TrainOneIter(const score_t* gradients, const score_t* hessians) override {
Expand All @@ -120,27 +105,51 @@ class RF: public GBDT {
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
std::unique_ptr<Tree> new_tree(new Tree(2));
size_t bias = static_cast<size_t>(cur_tree_id)* num_data_;
auto grad = gradients + bias;
auto hess = hessians + bias;

// need to copy gradients for bagging subset.
if (is_use_subset_ && bag_data_cnt_ < num_data_) {
for (int i = 0; i < bag_data_cnt_; ++i) {
tmp_grad_[i] = grad[bag_data_indices_[i]];
tmp_hess_[i] = hess[bag_data_indices_[i]];
if (class_need_train_[cur_tree_id]) {

auto grad = gradients + bias;
auto hess = hessians + bias;

// need to copy gradients for bagging subset.
if (is_use_subset_ && bag_data_cnt_ < num_data_) {
for (int i = 0; i < bag_data_cnt_; ++i) {
tmp_grad_[i] = grad[bag_data_indices_[i]];
tmp_hess_[i] = hess[bag_data_indices_[i]];
}
grad = tmp_grad_.data();
hess = tmp_hess_.data();
}
grad = tmp_grad_.data();
hess = tmp_hess_.data();

new_tree.reset(tree_learner_->Train(grad, hess, is_constant_hessian_,
forced_splits_json_));
}
new_tree.reset(tree_learner_->Train(grad, hess, is_constant_hessian_,
forced_splits_json_));

if (new_tree->num_leaves() > 1) {
tree_learner_->RenewTreeOutput(new_tree.get(), objective_function_, tmp_score_.data(),
tree_learner_->RenewTreeOutput(new_tree.get(), objective_function_, init_scores_[cur_tree_id],
num_data_, bag_data_indices_.data(), bag_data_cnt_);
if (std::fabs(init_scores_[cur_tree_id]) > kEpsilon) {
new_tree->AddBias(init_scores_[cur_tree_id]);
}
// update score
MultiplyScore(cur_tree_id, (iter_ + num_init_iteration_));
UpdateScore(new_tree.get(), cur_tree_id);
MultiplyScore(cur_tree_id, 1.0 / (iter_ + num_init_iteration_ + 1));
} else {
// only add default score one-time
if (models_.size() < static_cast<size_t>(num_tree_per_iteration_)) {
double output = 0.0;
if (!class_need_train_[cur_tree_id]) {
if (objective_function_ != nullptr) {
output = objective_function_->BoostFromScore(cur_tree_id);
} else {
output = init_scores_[cur_tree_id];
}
}
new_tree->AsConstantTree(output);
MultiplyScore(cur_tree_id, (iter_ + num_init_iteration_));
UpdateScore(new_tree.get(), cur_tree_id);
MultiplyScore(cur_tree_id, 1.0 / (iter_ + num_init_iteration_ + 1));
}
}
// add model
models_.push_back(std::move(new_tree));
Expand Down Expand Up @@ -178,7 +187,7 @@ class RF: public GBDT {
}

void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override {
const std::vector<const Metric*>& valid_metrics) override {
GBDT::AddValidDataset(valid_data, valid_metrics);
if (iter_ + num_init_iteration_ > 0) {
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
Expand All @@ -192,17 +201,13 @@ class RF: public GBDT {
return true;
};

std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const override {
return metric->Eval(score, nullptr);
}

private:

std::vector<score_t> tmp_grad_;
std::vector<score_t> tmp_hess_;
std::vector<double> tmp_score_;
std::vector<double> init_scores_;

};

} // namespace LightGBM
#endif // LIGHTGBM_BOOSTING_RF_H_
#endif // LIGHTGBM_BOOSTING_RF_H_

0 comments on commit cba8244

Please sign in to comment.