Skip to content

Commit

Permalink
renamed variables from bias to offset (#2539)
Browse files Browse the repository at this point in the history
  • Loading branch information
SiNZeRo authored and guolinke committed Nov 5, 2019
1 parent bd7e184 commit 516bd37
Show file tree
Hide file tree
Showing 10 changed files with 67 additions and 67 deletions.
22 changes: 11 additions & 11 deletions src/boosting/gbdt.cpp
Expand Up @@ -310,9 +310,9 @@ void GBDT::RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction)
leaf_pred[i] = tree_leaf_prediction[i][model_index];
CHECK(leaf_pred[i] < models_[model_index]->num_leaves());
}
size_t bias = static_cast<size_t>(tree_id) * num_data_;
auto grad = gradients_.data() + bias;
auto hess = hessians_.data() + bias;
size_t offset = static_cast<size_t>(tree_id) * num_data_;
auto grad = gradients_.data() + offset;
auto hess = hessians_.data() + offset;
auto new_tree = tree_learner_->FitByExistingTree(models_[model_index].get(), leaf_pred, grad, hess);
train_score_updater_->AddScore(tree_learner_.get(), new_tree, tree_id);
models_[model_index].reset(new_tree);
Expand Down Expand Up @@ -381,26 +381,26 @@ bool GBDT::TrainOneIter(const score_t* gradients, const score_t* hessians) {

bool should_continue = false;
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
const size_t bias = static_cast<size_t>(cur_tree_id) * num_data_;
const size_t offset = static_cast<size_t>(cur_tree_id) * num_data_;
std::unique_ptr<Tree> new_tree(new Tree(2));
if (class_need_train_[cur_tree_id] && train_data_->num_features() > 0) {
auto grad = gradients + bias;
auto hess = hessians + bias;
auto grad = gradients + offset;
auto hess = hessians + offset;
// need to copy gradients for bagging subset.
if (is_use_subset_ && bag_data_cnt_ < num_data_) {
for (int i = 0; i < bag_data_cnt_; ++i) {
gradients_[bias + i] = grad[bag_data_indices_[i]];
hessians_[bias + i] = hess[bag_data_indices_[i]];
gradients_[offset + i] = grad[bag_data_indices_[i]];
hessians_[offset + i] = hess[bag_data_indices_[i]];
}
grad = gradients_.data() + bias;
hess = hessians_.data() + bias;
grad = gradients_.data() + offset;
hess = hessians_.data() + offset;
}
new_tree.reset(tree_learner_->Train(grad, hess, is_constant_hessian_, forced_splits_json_));
}

if (new_tree->num_leaves() > 1) {
should_continue = true;
auto score_ptr = train_score_updater_->score() + bias;
auto score_ptr = train_score_updater_->score() + offset;
auto residual_getter = [score_ptr](const label_t* label, int i) {return static_cast<double>(label[i]) - score_ptr[i]; };
tree_learner_->RenewTreeOutput(new_tree.get(), objective_function_, residual_getter,
num_data_, bag_data_indices_.data(), bag_data_cnt_);
Expand Down
10 changes: 5 additions & 5 deletions src/boosting/rf.hpp
Expand Up @@ -91,9 +91,9 @@ class RF : public GBDT {
std::vector<double> tmp_scores(total_size, 0.0f);
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_tree_per_iteration_; ++j) {
size_t bias = static_cast<size_t>(j)* num_data_;
size_t offset = static_cast<size_t>(j)* num_data_;
for (data_size_t i = 0; i < num_data_; ++i) {
tmp_scores[bias + i] = init_scores_[j];
tmp_scores[offset + i] = init_scores_[j];
}
}
objective_function_->
Expand All @@ -110,10 +110,10 @@ class RF : public GBDT {
hessians = hessians_.data();
for (int cur_tree_id = 0; cur_tree_id < num_tree_per_iteration_; ++cur_tree_id) {
std::unique_ptr<Tree> new_tree(new Tree(2));
size_t bias = static_cast<size_t>(cur_tree_id)* num_data_;
size_t offset = static_cast<size_t>(cur_tree_id)* num_data_;
if (class_need_train_[cur_tree_id]) {
auto grad = gradients + bias;
auto hess = hessians + bias;
auto grad = gradients + offset;
auto hess = hessians + offset;

// need to copy gradients for bagging subset.
if (is_use_subset_ && bag_data_cnt_ < num_data_) {
Expand Down
16 changes: 8 additions & 8 deletions src/c_api.cpp
Expand Up @@ -1764,8 +1764,8 @@ IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* ind
const int32_t* ptr_col_ptr = reinterpret_cast<const int32_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int bias) {
int64_t i = static_cast<int64_t>(start + bias);
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
Expand All @@ -1777,8 +1777,8 @@ IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* ind
const int64_t* ptr_col_ptr = reinterpret_cast<const int64_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int bias) {
int64_t i = static_cast<int64_t>(start + bias);
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
Expand All @@ -1793,8 +1793,8 @@ IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* ind
const int32_t* ptr_col_ptr = reinterpret_cast<const int32_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int bias) {
int64_t i = static_cast<int64_t>(start + bias);
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
Expand All @@ -1806,8 +1806,8 @@ IterateFunctionFromCSC(const void* col_ptr, int col_ptr_type, const int32_t* ind
const int64_t* ptr_col_ptr = reinterpret_cast<const int64_t*>(col_ptr);
int64_t start = ptr_col_ptr[col_idx];
int64_t end = ptr_col_ptr[col_idx + 1];
return [=] (int bias) {
int64_t i = static_cast<int64_t>(start + bias);
return [=] (int offset) {
int64_t i = static_cast<int64_t>(start + offset);
if (i >= end) {
return std::make_pair(-1, 0.0);
}
Expand Down
8 changes: 4 additions & 4 deletions src/io/dense_bin.hpp
Expand Up @@ -24,9 +24,9 @@ class DenseBinIterator: public BinIterator {
max_bin_(static_cast<VAL_T>(max_bin)),
default_bin_(static_cast<VAL_T>(default_bin)) {
if (default_bin_ == 0) {
bias_ = 1;
offset_ = 1;
} else {
bias_ = 0;
offset_ = 0;
}
}
inline uint32_t RawGet(data_size_t idx) override;
Expand All @@ -38,7 +38,7 @@ class DenseBinIterator: public BinIterator {
VAL_T min_bin_;
VAL_T max_bin_;
VAL_T default_bin_;
uint8_t bias_;
uint8_t offset_;
};
/*!
* \brief Used to store bins for dense feature
Expand Down Expand Up @@ -334,7 +334,7 @@ template <typename VAL_T>
uint32_t DenseBinIterator<VAL_T>::Get(data_size_t idx) {
auto ret = bin_data_->data_[idx];
if (ret >= min_bin_ && ret <= max_bin_) {
return ret - min_bin_ + bias_;
return ret - min_bin_ + offset_;
} else {
return default_bin_;
}
Expand Down
8 changes: 4 additions & 4 deletions src/io/dense_nbits_bin.hpp
Expand Up @@ -22,9 +22,9 @@ class Dense4bitsBinIterator : public BinIterator {
max_bin_(static_cast<uint8_t>(max_bin)),
default_bin_(static_cast<uint8_t>(default_bin)) {
if (default_bin_ == 0) {
bias_ = 1;
offset_ = 1;
} else {
bias_ = 0;
offset_ = 0;
}
}
inline uint32_t RawGet(data_size_t idx) override;
Expand All @@ -36,7 +36,7 @@ class Dense4bitsBinIterator : public BinIterator {
uint8_t min_bin_;
uint8_t max_bin_;
uint8_t default_bin_;
uint8_t bias_;
uint8_t offset_;
};

class Dense4bitsBin : public Bin {
Expand Down Expand Up @@ -383,7 +383,7 @@ class Dense4bitsBin : public Bin {
uint32_t Dense4bitsBinIterator::Get(data_size_t idx) {
const auto bin = (bin_data_->data_[idx >> 1] >> ((idx & 1) << 2)) & 0xf;
if (bin >= min_bin_ && bin <= max_bin_) {
return bin - min_bin_ + bias_;
return bin - min_bin_ + offset_;
} else {
return default_bin_;
}
Expand Down
12 changes: 6 additions & 6 deletions src/io/parser.hpp
Expand Up @@ -24,15 +24,15 @@ class CSVParser: public Parser {
std::vector<std::pair<int, double>>* out_features, double* out_label) const override {
int idx = 0;
double val = 0.0f;
int bias = 0;
int offset = 0;
*out_label = 0.0f;
while (*str != '\0') {
str = Common::Atof(str, &val);
if (idx == label_idx_) {
*out_label = val;
bias = -1;
offset = -1;
} else if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
out_features->emplace_back(idx + bias, val);
out_features->emplace_back(idx + offset, val);
}
++idx;
if (*str == ',') {
Expand Down Expand Up @@ -61,14 +61,14 @@ class TSVParser: public Parser {
std::vector<std::pair<int, double>>* out_features, double* out_label) const override {
int idx = 0;
double val = 0.0f;
int bias = 0;
int offset = 0;
while (*str != '\0') {
str = Common::Atof(str, &val);
if (idx == label_idx_) {
*out_label = val;
bias = -1;
offset = -1;
} else if (std::fabs(val) > kZeroThreshold || std::isnan(val)) {
out_features->emplace_back(idx + bias, val);
out_features->emplace_back(idx + offset, val);
}
++idx;
if (*str == '\t') {
Expand Down
8 changes: 4 additions & 4 deletions src/io/sparse_bin.hpp
Expand Up @@ -31,9 +31,9 @@ class SparseBinIterator: public BinIterator {
max_bin_(static_cast<VAL_T>(max_bin)),
default_bin_(static_cast<VAL_T>(default_bin)) {
if (default_bin_ == 0) {
bias_ = 1;
offset_ = 1;
} else {
bias_ = 0;
offset_ = 0;
}
Reset(0);
}
Expand All @@ -48,7 +48,7 @@ class SparseBinIterator: public BinIterator {
inline uint32_t Get(data_size_t idx) override {
VAL_T ret = InnerRawGet(idx);
if (ret >= min_bin_ && ret <= max_bin_) {
return ret - min_bin_ + bias_;
return ret - min_bin_ + offset_;
} else {
return default_bin_;
}
Expand All @@ -63,7 +63,7 @@ class SparseBinIterator: public BinIterator {
VAL_T min_bin_;
VAL_T max_bin_;
VAL_T default_bin_;
uint8_t bias_;
uint8_t offset_;
};

template <typename VAL_T>
Expand Down
4 changes: 2 additions & 2 deletions src/objective/multiclass_objective.hpp
Expand Up @@ -221,8 +221,8 @@ class MulticlassOVA: public ObjectiveFunction {

void GetGradients(const double* score, score_t* gradients, score_t* hessians) const override {
for (int i = 0; i < num_class_; ++i) {
int64_t bias = static_cast<int64_t>(num_data_) * i;
binary_loss_[i]->GetGradients(score + bias, gradients + bias, hessians + bias);
int64_t offset = static_cast<int64_t>(num_data_) * i;
binary_loss_[i]->GetGradients(score + offset, gradients + offset, hessians + offset);
}
}

Expand Down

0 comments on commit 516bd37

Please sign in to comment.