Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: run clang-format over codebase #3610

Merged
merged 5 commits into from
Jan 14, 2022
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 1 addition & 4 deletions vowpalwabbit/allreduce_threads.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,7 @@ AllReduceSync::AllReduceSync(const size_t total) : m_total(total), m_count(0), m
buffers = new void*[total];
}

AllReduceSync::~AllReduceSync()
{
delete[] buffers;
}
AllReduceSync::~AllReduceSync() { delete[] buffers; }

void AllReduceSync::waitForSynchronization()
{
Expand Down
43 changes: 20 additions & 23 deletions vowpalwabbit/beam.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@ struct beam_element
{
uint32_t hash; // a cached hash value -- if a ~= b then h(a) must== h(b)
float cost; // cost of this element
T *data; // pointer to element data -- rarely accessed!
T* data; // pointer to element data -- rarely accessed!
bool active; // is this currently active
// bool recombined; // if we're not the BEST then we've been recombined
// v_array<T*> * recomb_friends; // if we're the BEST (among ~= elements), then recomb_friends is everything that's
// equivalent to us but worse... NOT USED if we're not doing k-best predictions
};

inline int compare_on_cost(const void *void_a, const void *void_b)
inline int compare_on_cost(const void* void_a, const void* void_b)
{
if (void_a == void_b) return 0;
const beam_element<void>* a = static_cast<const beam_element<void>*>(void_a);
Expand All @@ -47,7 +47,7 @@ inline int compare_on_cost(const void *void_a, const void *void_b)
return 0;
}

inline int compare_on_hash_then_cost(const void *void_a, const void *void_b)
inline int compare_on_hash_then_cost(const void* void_a, const void* void_b)
{
if (void_a == void_b) return 0;
const beam_element<void>* a = static_cast<const beam_element<void>*>(void_a);
Expand Down Expand Up @@ -81,17 +81,17 @@ class beam
float worst_cost; // what is the cost of the worst (highest cost) item in the beam
float best_cost; // what is the cost of the best (lowest cost) item in the beam
float prune_if_gt; // prune any element with cost greater than this
T *best_cost_data; // easy access to best-cost item
T* best_cost_data; // easy access to best-cost item
bool do_kbest;
v_array<beam_element<T>> A; // the actual data
// v_array<v_array<beam_element<T>*>> recomb_buckets;

// static size_t NUM_RECOMB_BUCKETS = 10231;

bool (*is_equivalent)(T *, T *); // test if two items are equivalent; nullptr means don't do hypothesis recombination
bool (*is_equivalent)(T*, T*); // test if two items are equivalent; nullptr means don't do hypothesis recombination

public:
beam(size_t beam_size, float prune_coeff = FLT_MAX, bool (*test_equiv)(T *, T *) = nullptr, bool kbest = false)
beam(size_t beam_size, float prune_coeff = FLT_MAX, bool (*test_equiv)(T*, T*) = nullptr, bool kbest = false)
: beam_size(beam_size), pruning_coefficient(prune_coeff), do_kbest(kbest), is_equivalent(test_equiv)
{
count = 0;
Expand All @@ -108,7 +108,7 @@ class beam

inline bool might_insert(float cost) { return (cost <= prune_if_gt) && ((count < beam_size) || (cost < worst_cost)); }

bool insert(T *data, float cost, uint32_t hash) // returns TRUE iff element was actually added
bool insert(T* data, float cost, uint32_t hash) // returns TRUE iff element was actually added
{
if (!might_insert(cost)) return false;

Expand Down Expand Up @@ -175,21 +175,21 @@ class beam
return true;
}

beam_element<T> *get_best_item()
beam_element<T>* get_best_item()
{
if (count == 0) return nullptr;
beam_element<T> *ret = A.begin;
beam_element<T>* ret = A.begin;
while ((ret != A.end) && (!ret->active)) ++ret;
return (ret == A.end) ? nullptr : ret;
}

beam_element<T> *pop_best_item()
beam_element<T>* pop_best_item()
{
if (count == 0) return nullptr;

beam_element<T> *ret = nullptr;
beam_element<T>* ret = nullptr;
float next_best_cost = FLT_MAX;
for (beam_element<T> *el = A.begin; el != A.end; el++)
for (beam_element<T>* el = A.begin; el != A.end; el++)
if ((ret == nullptr) && el->active && (el->cost <= best_cost))
ret = el;
else if (el->active && (el->cost < next_best_cost))
Expand Down Expand Up @@ -250,7 +250,7 @@ class beam
}
}

void compact(void (*free_data)(T *) = nullptr)
void compact(void (*free_data)(T*) = nullptr)
{
if (is_equivalent) do_recombination();
qsort(A.begin, A.size(), sizeof(beam_element<T>), compare_on_cost); // TODO: quick select
Expand All @@ -262,7 +262,7 @@ class beam
while ((count > 1) && !A[count - 1].active) count--;

if (free_data)
for (beam_element<T> *be = A.begin + count; be != A.end; ++be) free_data(be->data);
for (beam_element<T>* be = A.begin + count; be != A.end; ++be) free_data(be->data);

A.end = A.begin + count;

Expand All @@ -272,15 +272,15 @@ class beam
best_cost_data = A[0].data;
}

void maybe_compact(void (*free_data)(T *) = nullptr)
void maybe_compact(void (*free_data)(T*) = nullptr)
{
if (count >= beam_size * 10) compact(free_data);
}

void erase(void (*free_data)(T *) = nullptr)
void erase(void (*free_data)(T*) = nullptr)
{
if (free_data)
for (beam_element<T> *be = A.begin; be != A.end; ++be) free_data(be->data);
for (beam_element<T>* be = A.begin; be != A.end; ++be) free_data(be->data);
A.erase();
count = 0;
worst_cost = -FLT_MAX;
Expand All @@ -289,13 +289,10 @@ class beam
best_cost_data = nullptr;
}

~beam()
{
assert(A.size() == 0);
}
~beam() { assert(A.size() == 0); }

beam_element<T> *begin() { return A.begin; }
beam_element<T> *end() { return A.end; }
beam_element<T>* begin() { return A.begin; }
beam_element<T>* end() { return A.end; }
size_t size() { return count; }
bool empty() { return A.empty(); }
size_t get_beam_size() { return beam_size; }
Expand Down
25 changes: 6 additions & 19 deletions vowpalwabbit/boosting.cc
Original file line number Diff line number Diff line change
Expand Up @@ -297,20 +297,14 @@ void save_load_sampling(boosting& o, io_buf& model_file, bool read, bool text)

// avoid making syscalls multiple times
fmt::memory_buffer buffer;
if (read)
{
fmt::format_to(buffer, "Loading alpha and v: \n");
}
if (read) { fmt::format_to(buffer, "Loading alpha and v: \n"); }
else
{
fmt::format_to(buffer, "Saving alpha and v, current weighted_examples = {}\n",
o.all->sd->weighted_labeled_examples + o.all->sd->weighted_unlabeled_examples);
o.all->sd->weighted_labeled_examples + o.all->sd->weighted_unlabeled_examples);
}

for (int i = 0; i < o.N; i++)
{
fmt::format_to(buffer, "{0} {1}\n", o.alpha[i], o.v[i]);
}
for (int i = 0; i < o.N; i++) { fmt::format_to(buffer, "{0} {1}\n", o.alpha[i], o.v[i]); }
o.logger.err_info("{}", fmt::to_string(buffer));
}

Expand Down Expand Up @@ -347,20 +341,13 @@ void save_load(boosting& o, io_buf& model_file, bool read, bool text)
{
// avoid making syscalls multiple times
fmt::memory_buffer buffer;
if (read)
{
fmt::format_to(buffer, "Loading alpha: \n");
}
if (read) { fmt::format_to(buffer, "Loading alpha: \n"); }
else
{
fmt::format_to(buffer, "Saving alpha, current weighted_examples = {)\n",
o.all->sd->weighted_examples());
fmt::format_to(buffer, "Saving alpha, current weighted_examples = {)\n", o.all->sd->weighted_examples());
}

for (int i = 0; i < o.N; i++)
{
fmt::format_to(buffer, "{} \n", o.alpha[i]);
}
for (int i = 0; i < o.N; i++) { fmt::format_to(buffer, "{} \n", o.alpha[i]); }
o.logger.err_info("{}", fmt::to_string(buffer));
}
}
Expand Down
14 changes: 7 additions & 7 deletions vowpalwabbit/cb_dro.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ struct cb_dro_data
bool isValid() { return chisq.isValid(); }

template <bool is_learn, bool is_explore>
inline void learn_or_predict(multi_learner &base, multi_ex &examples)
inline void learn_or_predict(multi_learner& base, multi_ex& examples)
{
// Some explanation required.
//
Expand All @@ -39,7 +39,7 @@ struct cb_dro_data
if (is_learn)
{
const auto it =
std::find_if(examples.begin(), examples.end(), [](example *item) { return !item->l.cb.costs.empty(); });
std::find_if(examples.begin(), examples.end(), [](example* item) { return !item->l.cb.costs.empty(); });

if (it != examples.end())
{
Expand All @@ -53,7 +53,7 @@ struct cb_dro_data

const auto maxit = is_explore
? std::max_element(action_scores.begin(), action_scores.end(),
[](const ACTION_SCORE::action_score &a, const ACTION_SCORE::action_score &b) {
[](const ACTION_SCORE::action_score& a, const ACTION_SCORE::action_score& b) {
return ACTION_SCORE::score_comp(&a, &b) < 0;
})
: action_scores.begin();
Expand All @@ -73,16 +73,16 @@ struct cb_dro_data
save_weight.clear();
save_weight.reserve(examples.size());
std::transform(examples.cbegin(), examples.cend(), std::back_inserter(save_weight),
[](example *item) { return item->weight; });
std::for_each(examples.begin(), examples.end(), [qlb](example *item) { item->weight *= qlb; });
[](example* item) { return item->weight; });
std::for_each(examples.begin(), examples.end(), [qlb](example* item) { item->weight *= qlb; });

// TODO: make sure descendants "do the right thing" with example->weight
multiline_learn_or_predict<true>(base, examples, examples[0]->ft_offset);

// restore the original weights
auto save_weight_it = save_weight.begin();
std::for_each(
examples.begin(), examples.end(), [&save_weight_it](example *item) { item->weight = *save_weight_it++; });
examples.begin(), examples.end(), [&save_weight_it](example* item) { item->weight = *save_weight_it++; });
}
}
}
Expand All @@ -94,7 +94,7 @@ struct cb_dro_data
} // namespace VW

template <bool is_learn, bool is_explore>
void learn_or_predict(cb_dro_data &data, multi_learner &base, multi_ex &examples)
void learn_or_predict(cb_dro_data& data, multi_learner& base, multi_ex& examples)
{
data.learn_or_predict<is_learn, is_explore>(base, examples);
}
Expand Down
10 changes: 5 additions & 5 deletions vowpalwabbit/cb_explore_adf_bag.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,10 @@ struct cb_explore_adf_bag
float epsilon, size_t bag_size, bool greedify, bool first_only, std::shared_ptr<VW::rand_state> random_state);

// Should be called through cb_explore_adf_base for pre/post-processing
void predict(VW::LEARNER::multi_learner &base, multi_ex &examples);
void learn(VW::LEARNER::multi_learner &base, multi_ex &examples);
void predict(VW::LEARNER::multi_learner& base, multi_ex& examples);
void learn(VW::LEARNER::multi_learner& base, multi_ex& examples);

const PredictionT &get_cached_prediction() { return _action_probs; };
const PredictionT& get_cached_prediction() { return _action_probs; };

private:
uint32_t get_bag_learner_update_count(uint32_t learner_index);
Expand All @@ -76,7 +76,7 @@ uint32_t cb_explore_adf_bag::get_bag_learner_update_count(uint32_t learner_index
return BS::weight_gen(_random_state);
}

void cb_explore_adf_bag::predict(VW::LEARNER::multi_learner &base, multi_ex &examples)
void cb_explore_adf_bag::predict(VW::LEARNER::multi_learner& base, multi_ex& examples)
{
// Randomize over predictions from a base set of predictors
v_array<ACTION_SCORE::action_score>& preds = examples[0]->pred.a_s;
Expand Down Expand Up @@ -118,7 +118,7 @@ void cb_explore_adf_bag::predict(VW::LEARNER::multi_learner &base, multi_ex &exa
std::copy(std::begin(_action_probs), std::end(_action_probs), std::begin(preds));
}

void cb_explore_adf_bag::learn(VW::LEARNER::multi_learner &base, multi_ex &examples)
void cb_explore_adf_bag::learn(VW::LEARNER::multi_learner& base, multi_ex& examples)
{
for (uint32_t i = 0; i < _bag_size; i++)
{
Expand Down
12 changes: 6 additions & 6 deletions vowpalwabbit/cb_sample.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ struct cb_sample_data
explicit cb_sample_data(std::shared_ptr<VW::rand_state>&& random_state) : _random_state(random_state) {}

template <bool is_learn>
inline void learn_or_predict(multi_learner &base, multi_ex &examples)
inline void learn_or_predict(multi_learner& base, multi_ex& examples)
{
// If base.learn() does not return prediction then we need to predict first
// so that there is something to sample from
Expand All @@ -39,13 +39,13 @@ struct cb_sample_data

multiline_learn_or_predict<is_learn>(base, examples, examples[0]->ft_offset);

auto &action_scores = examples[0]->pred.a_s;
auto& action_scores = examples[0]->pred.a_s;

uint32_t chosen_action = 0;
int64_t maybe_labelled_action = -1;

// Find that chosen action in the learning case, skip the shared example.
auto it = std::find_if(examples.begin(), examples.end(), [](example *item) { return !item->l.cb.costs.empty(); });
auto it = std::find_if(examples.begin(), examples.end(), [](example* item) { return !item->l.cb.costs.empty(); });
if (it != examples.end()) { maybe_labelled_action = static_cast<int64_t>(std::distance(examples.begin(), it)); }

// If we are learning and have a label, then take that action as the chosen action. Otherwise sample the
Expand All @@ -57,7 +57,7 @@ struct cb_sample_data
// This only matters if the prediction decided to explore, but the same output should happen for the learn case.
for (size_t i = 0; i < action_scores.size(); i++)
{
auto &a_s = action_scores[i];
auto& a_s = action_scores[i];
if (a_s.action == static_cast<uint32_t>(maybe_labelled_action))
{
chosen_action = static_cast<uint32_t>(i);
Expand Down Expand Up @@ -92,7 +92,7 @@ struct cb_sample_data
_UNUSED(result);
}

std::string cb_decision_to_string(const ACTION_SCORE::action_scores &action_scores)
std::string cb_decision_to_string(const ACTION_SCORE::action_scores& action_scores)
{
std::ostringstream ostrm;
if (action_scores.empty()) return "";
Expand All @@ -106,7 +106,7 @@ struct cb_sample_data
} // namespace VW

template <bool is_learn>
void learn_or_predict(cb_sample_data &data, multi_learner &base, multi_ex &examples)
void learn_or_predict(cb_sample_data& data, multi_learner& base, multi_ex& examples)
{
data.learn_or_predict<is_learn>(base, examples);
}
Expand Down
5 changes: 3 additions & 2 deletions vowpalwabbit/ccb_reduction_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@

#include <cstdint>

namespace CCB {
namespace CCB
{
enum class example_type : uint8_t
{
unset = 0,
Expand All @@ -23,7 +24,7 @@ struct reduction_features
v_array<uint32_t> explicit_included_actions;
void clear() { explicit_included_actions.clear(); }
};
}
} // namespace CCB

namespace VW
{
Expand Down
10 changes: 5 additions & 5 deletions vowpalwabbit/constant.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ constexpr unsigned char nn_output_namespace = 129;
constexpr unsigned char autolink_namespace = 130;
constexpr unsigned char neighbor_namespace =
131; // this is \x83 -- to do quadratic, say "-q a`printf "\x83"` on the command line
constexpr unsigned char affix_namespace = 132; // this is \x84
constexpr unsigned char spelling_namespace = 133; // this is \x85
constexpr unsigned char conditioning_namespace = 134; // this is \x86
constexpr unsigned char dictionary_namespace = 135; // this is \x87
constexpr unsigned char node_id_namespace = 136; // this is \x88
constexpr unsigned char affix_namespace = 132; // this is \x84
constexpr unsigned char spelling_namespace = 133; // this is \x85
constexpr unsigned char conditioning_namespace = 134; // this is \x86
constexpr unsigned char dictionary_namespace = 135; // this is \x87
constexpr unsigned char node_id_namespace = 136; // this is \x88
constexpr unsigned char baseline_enabled_message_namespace = 137; // this is \x89
constexpr unsigned char ccb_slot_namespace = 139;
constexpr unsigned char ccb_id_namespace = 140;
Expand Down
5 changes: 1 addition & 4 deletions vowpalwabbit/continuous_actions_reduction_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,7 @@ struct reduction_features
bool is_chosen_action_set() const { return !std::isnan(chosen_action); }
bool is_pdf_set() const { return pdf.size() > 0; }

reduction_features()
{
chosen_action = std::numeric_limits<float>::quiet_NaN();
}
reduction_features() { chosen_action = std::numeric_limits<float>::quiet_NaN(); }

void clear()
{
Expand Down
1 change: 0 additions & 1 deletion vowpalwabbit/cost_sensitive.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

#include "io/logger.h"


namespace COST_SENSITIVE
{
void name_value(VW::string_view s, std::vector<VW::string_view>& name, float& v, VW::io::logger& logger)
Expand Down