Skip to content

Commit

Permalink
remove many vector.at()
Browse files Browse the repository at this point in the history
  • Loading branch information
guolinke committed Nov 1, 2019
1 parent 8f7199a commit a36eb7e
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 41 deletions.
28 changes: 16 additions & 12 deletions include/LightGBM/utils/common.h
Expand Up @@ -632,17 +632,19 @@ inline static void Softmax(const double* input, double* output, int len) {
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
for (auto t = input.begin(); t !=input.end(); ++t) {
ret.push_back(t->get());
}
return ret;
}

template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
auto& ref_key = *keys;
auto& ref_value = *values;
for (size_t i = start; i < keys->size(); ++i) {
arr.emplace_back(keys->at(i), values->at(i));
arr.emplace_back(ref_key[i], ref_value[i]);
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
Expand All @@ -654,16 +656,17 @@ inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, s
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys->at(i) = arr[i].first;
values->at(i) = arr[i].second;
ref_key[i] = arr[i].first;
ref_value[i] = arr[i].second;
}
}

template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) {
std::vector<T*> ptr(data->size());
auto& ref_data = *data;
for (size_t i = 0; i < data->size(); ++i) {
ptr[i] = data->at(i).data();
ptr[i] = ref_data[i].data();
}
return ptr;
}
Expand Down Expand Up @@ -841,12 +844,13 @@ inline static std::vector<uint32_t> EmptyBitset(int n) {

template<typename T>
inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) {
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
vec->at(i1) |= (1 << i2);
auto& ref_v = *vec;
int i1 = val / 32;
int i2 = val % 32;
if (static_cast<int>(vec->size()) < i1 + 1) {
vec->resize(i1 + 1, 0);
}
ref_v[i1] |= (1 << i2);
}

template<typename T>
Expand Down
45 changes: 24 additions & 21 deletions src/io/dataset.cpp
Expand Up @@ -61,8 +61,9 @@ int GetConfilctCount(const std::vector<bool>& mark, const int* indices, int num_
return ret;
}
void MarkUsed(std::vector<bool>* mark, const int* indices, int num_indices) {
auto& ref_mark = *mark;
for (int i = 0; i < num_indices; ++i) {
mark->at(indices[i]) = true;
ref_mark[indices[i]] = true;
}
}

Expand Down Expand Up @@ -238,8 +239,9 @@ void Dataset::Construct(
sparse_threshold_ = io_config.sparse_threshold;
// get num_features
std::vector<int> used_features;
auto& ref_bin_mappers = *bin_mappers;
for (int i = 0; i < static_cast<int>(bin_mappers->size()); ++i) {
if (bin_mappers->at(i) != nullptr && !bin_mappers->at(i)->is_trivial()) {
if (ref_bin_mappers[i] != nullptr && !ref_bin_mappers[i]->is_trivial()) {
used_features.emplace_back(i);
}
}
Expand Down Expand Up @@ -277,7 +279,7 @@ void Dataset::Construct(
real_feature_idx_[cur_fidx] = real_fidx;
feature2group_[cur_fidx] = i;
feature2subfeature_[cur_fidx] = j;
cur_bin_mappers.emplace_back(bin_mappers->at(real_fidx).release());
cur_bin_mappers.emplace_back(ref_bin_mappers[real_fidx].release());
++cur_fidx;
}
feature_groups_.emplace_back(std::unique_ptr<FeatureGroup>(
Expand Down Expand Up @@ -848,6 +850,7 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
int num_used_group = static_cast<int>(used_group.size());
auto ptr_ordered_grad = gradients;
auto ptr_ordered_hess = hessians;
auto& ref_ordered_bins = *ordered_bins;
if (data_indices != nullptr && num_data < num_data_) {
if (!is_constant_hessian) {
#pragma omp parallel for schedule(static)
Expand All @@ -874,7 +877,7 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const int num_bin = feature_groups_[group]->num_total_bin_;
std::memset(reinterpret_cast<void*>(data_ptr + 1), 0, (num_bin - 1) * sizeof(HistogramBinEntry));
// construct histograms for smaller leaf
if (ordered_bins->at(group) == nullptr) {
if (ref_ordered_bins[group] == nullptr) {
// if not use ordered bin
feature_groups_[group]->bin_data_->ConstructHistogram(
data_indices,
Expand All @@ -884,10 +887,10 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
data_ptr);
} else {
// used ordered bin
ordered_bins->at(group)->ConstructHistogram(leaf_idx,
gradients,
hessians,
data_ptr);
ref_ordered_bins[group]->ConstructHistogram(leaf_idx,
gradients,
hessians,
data_ptr);
}
OMP_LOOP_EX_END();
}
Expand All @@ -903,7 +906,7 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const int num_bin = feature_groups_[group]->num_total_bin_;
std::memset(reinterpret_cast<void*>(data_ptr + 1), 0, (num_bin - 1) * sizeof(HistogramBinEntry));
// construct histograms for smaller leaf
if (ordered_bins->at(group) == nullptr) {
if (ref_ordered_bins[group] == nullptr) {
// if not use ordered bin
feature_groups_[group]->bin_data_->ConstructHistogram(
data_indices,
Expand All @@ -912,9 +915,9 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
data_ptr);
} else {
// used ordered bin
ordered_bins->at(group)->ConstructHistogram(leaf_idx,
gradients,
data_ptr);
ref_ordered_bins[group]->ConstructHistogram(leaf_idx,
gradients,
data_ptr);
}
// fixed hessian.
for (int i = 0; i < num_bin; ++i) {
Expand All @@ -936,7 +939,7 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const int num_bin = feature_groups_[group]->num_total_bin_;
std::memset(reinterpret_cast<void*>(data_ptr + 1), 0, (num_bin - 1) * sizeof(HistogramBinEntry));
// construct histograms for smaller leaf
if (ordered_bins->at(group) == nullptr) {
if (ref_ordered_bins[group] == nullptr) {
// if not use ordered bin
feature_groups_[group]->bin_data_->ConstructHistogram(
num_data,
Expand All @@ -945,10 +948,10 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
data_ptr);
} else {
// used ordered bin
ordered_bins->at(group)->ConstructHistogram(leaf_idx,
gradients,
hessians,
data_ptr);
ref_ordered_bins[group]->ConstructHistogram(leaf_idx,
gradients,
hessians,
data_ptr);
}
OMP_LOOP_EX_END();
}
Expand All @@ -964,17 +967,17 @@ void Dataset::ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const int num_bin = feature_groups_[group]->num_total_bin_;
std::memset(reinterpret_cast<void*>(data_ptr + 1), 0, (num_bin - 1) * sizeof(HistogramBinEntry));
// construct histograms for smaller leaf
if (ordered_bins->at(group) == nullptr) {
if (ref_ordered_bins[group] == nullptr) {
// if not use ordered bin
feature_groups_[group]->bin_data_->ConstructHistogram(
num_data,
ptr_ordered_grad,
data_ptr);
} else {
// used ordered bin
ordered_bins->at(group)->ConstructHistogram(leaf_idx,
gradients,
data_ptr);
ref_ordered_bins[group]->ConstructHistogram(leaf_idx,
gradients,
data_ptr);
}
// fixed hessian.
for (int i = 0; i < num_bin; ++i) {
Expand Down
7 changes: 4 additions & 3 deletions src/io/dataset_loader.cpp
Expand Up @@ -1048,6 +1048,7 @@ void DatasetLoader::ConstructBinMappersFromTextData(int rank, int num_machines,
void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>* text_data, const Parser* parser, Dataset* dataset) {
std::vector<std::pair<int, double>> oneline_features;
double tmp_label = 0.0f;
auto& ref_text_data = *text_data;
if (predict_fun_ == nullptr) {
OMP_INIT_EX();
// if doesn't need to prediction with initial model
Expand All @@ -1057,11 +1058,11 @@ void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>* text_dat
const int tid = omp_get_thread_num();
oneline_features.clear();
// parser
parser->ParseOneLine(text_data->at(i).c_str(), &oneline_features, &tmp_label);
parser->ParseOneLine(ref_text_data[i].c_str(), &oneline_features, &tmp_label);
// set label
dataset->metadata_.SetLabelAt(i, static_cast<label_t>(tmp_label));
// free processed line:
text_data->at(i).clear();
ref_text_data[i].clear();
// shrink_to_fit will be very slow in linux, and seems not free memory, disable for now
// text_reader_->Lines()[i].shrink_to_fit();
// push data
Expand Down Expand Up @@ -1094,7 +1095,7 @@ void DatasetLoader::ExtractFeaturesFromMemory(std::vector<std::string>* text_dat
const int tid = omp_get_thread_num();
oneline_features.clear();
// parser
parser->ParseOneLine(text_data->at(i).c_str(), &oneline_features, &tmp_label);
parser->ParseOneLine(ref_text_data[i].c_str(), &oneline_features, &tmp_label);
// set initial score
std::vector<double> oneline_init_score(num_class_);
predict_fun_(oneline_features, oneline_init_score.data());
Expand Down
8 changes: 5 additions & 3 deletions src/metric/multiclass_metric.hpp
Expand Up @@ -140,9 +140,10 @@ class MultiErrorMetric: public MulticlassMetric<MultiErrorMetric> {

inline static double LossOnPoint(label_t label, std::vector<double>* score, const Config& config) {
size_t k = static_cast<size_t>(label);
auto& ref_score = *score;
int num_larger = 0;
for (size_t i = 0; i < score->size(); ++i) {
if (score->at(i) >= score->at(k)) ++num_larger;
if (ref_score[i] >= ref_score[k]) ++num_larger;
if (num_larger > config.multi_error_top_k) return 1.0f;
}
return 0.0f;
Expand All @@ -164,8 +165,9 @@ class MultiSoftmaxLoglossMetric: public MulticlassMetric<MultiSoftmaxLoglossMetr

inline static double LossOnPoint(label_t label, std::vector<double>* score, const Config&) {
size_t k = static_cast<size_t>(label);
if (score->at(k) > kEpsilon) {
return static_cast<double>(-std::log(score->at(k)));
auto& ref_score = *score;
if (ref_score[k] > kEpsilon) {
return static_cast<double>(-std::log(ref_score[k]));
} else {
return -std::log(kEpsilon);
}
Expand Down
5 changes: 3 additions & 2 deletions src/treelearner/cost_effective_gradient_boosting.hpp
Expand Up @@ -63,14 +63,15 @@ class CostEfficientGradientBoosting {
auto config = tree_learner_->config_;
auto train_data = tree_learner_->train_data_;
const int inner_feature_index = train_data->InnerFeatureIndex(best_split_info->feature);
auto& ref_best_split_per_leaf = *best_split_per_leaf;
if (!config->cegb_penalty_feature_coupled.empty() && !is_feature_used_in_split_[inner_feature_index]) {
is_feature_used_in_split_[inner_feature_index] = true;
for (int i = 0; i < tree->num_leaves(); ++i) {
if (i == best_leaf) continue;
auto split = &splits_per_leaf_[static_cast<size_t>(i) * train_data->num_features() + inner_feature_index];
split->gain += config->cegb_tradeoff * config->cegb_penalty_feature_coupled[best_split_info->feature];
if (*split > best_split_per_leaf->at(i))
best_split_per_leaf->at(i) = *split;
if (*split > ref_best_split_per_leaf[i])
ref_best_split_per_leaf[i] = *split;
}
}
if (!config->cegb_penalty_feature_lazy.empty()) {
Expand Down

0 comments on commit a36eb7e

Please sign in to comment.