Skip to content

Commit

Permalink
update v_arry for more vector-like syntax
Browse files Browse the repository at this point in the history
  • Loading branch information
JohnLangford committed Apr 23, 2018
1 parent 5e25d1d commit fe480f2
Show file tree
Hide file tree
Showing 38 changed files with 155 additions and 157 deletions.
3 changes: 1 addition & 2 deletions library/search_generate.cc
Expand Up @@ -308,7 +308,7 @@ class Generator : public SearchTask<input, output>
}
ex("w=" + tmp);

ref.erase();
ref.clear();

/*
vector<char>& best = ied.next();
Expand Down Expand Up @@ -495,4 +495,3 @@ int main(int argc, char *argv[])
//predict();
//run_easy();
}

2 changes: 1 addition & 1 deletion vowpalwabbit/autolink.cc
Expand Up @@ -30,7 +30,7 @@ void predict_or_learn(autolink& b, LEARNER::single_learner& base, example& ec)
base.predict(ec);

ec.total_sum_feat_sq -= fs.sum_feat_sq;
fs.erase();
fs.clear();
ec.indices.pop();
}

Expand Down
8 changes: 4 additions & 4 deletions vowpalwabbit/bfgs.cc
Expand Up @@ -668,7 +668,7 @@ int process_pass(vw& all, bfgs& b)
b.net_time = (int) (1000.0 * (b.t_end_global.time - b.t_start_global.time) + (b.t_end_global.millitm - b.t_start_global.millitm));
if (!all.quiet)
fprintf(stderr, "%-10s\t%-10.5f\t%-.5f\n", "", d_mag, b.step_size);
b.predictions.erase();
b.predictions.clear();
update_weight(all, b.step_size);
}
}
Expand Down Expand Up @@ -727,7 +727,7 @@ int process_pass(vw& all, bfgs& b)
fprintf(stderr, "%-10s\t%-10s\t(revise x %.1f)\t%-.5f\n",
"","",ratio,
new_step);
b.predictions.erase();
b.predictions.clear();
update_weight(all, (float)(-b.step_size+new_step));
b.step_size = (float)new_step;
zero_derivative(all);
Expand Down Expand Up @@ -775,7 +775,7 @@ int process_pass(vw& all, bfgs& b)
b.net_time = (int) (1000.0 * (b.t_end_global.time - b.t_start_global.time) + (b.t_end_global.millitm - b.t_start_global.millitm));
if (!all.quiet)
fprintf(stderr, "%-10s\t%-10.5f\t%-.5f\n", "", d_mag, b.step_size);
b.predictions.erase();
b.predictions.clear();
update_weight(all, b.step_size);
}
}
Expand Down Expand Up @@ -811,7 +811,7 @@ int process_pass(vw& all, bfgs& b)

float d_mag = direction_magnitude(all);

b.predictions.erase();
b.predictions.clear();
update_weight(all, b.step_size);
ftime(&b.t_end_global);
b.net_time = (int) (1000.0 * (b.t_end_global.time - b.t_start_global.time) + (b.t_end_global.millitm - b.t_start_global.millitm));
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/cache.cc
Expand Up @@ -51,7 +51,7 @@ size_t read_cached_tag(io_buf& cache, example* ae)
if (buf_read(cache, c, tag_size) < tag_size)
return 0;

ae->tag.erase();
ae->tag.clear();
push_many(ae->tag, c, tag_size);
return tag_size+sizeof(tag_size);
}
Expand Down
6 changes: 3 additions & 3 deletions vowpalwabbit/cb.cc
Expand Up @@ -28,7 +28,7 @@ bool is_test_label(CB::label& ld)
char* bufread_label(CB::label* ld, char* c, io_buf& cache)
{
size_t num = *(size_t *)c;
ld->costs.erase();
ld->costs.clear();
c += sizeof(size_t);
size_t total = sizeof(cb_class)*num;
if (buf_read(cache, c, total) < total)
Expand All @@ -49,7 +49,7 @@ char* bufread_label(CB::label* ld, char* c, io_buf& cache)
size_t read_cached_label(shared_data*, void* v, io_buf& cache)
{
CB::label* ld = (CB::label*) v;
ld->costs.erase();
ld->costs.clear();
char *c;
size_t total = sizeof(size_t);
if (buf_read(cache, c, total) < total)
Expand Down Expand Up @@ -87,7 +87,7 @@ void cache_label(void* v, io_buf& cache)
void default_label(void* v)
{
CB::label* ld = (CB::label*) v;
ld->costs.erase();
ld->costs.clear();
}

void delete_label(void* v)
Expand Down
14 changes: 7 additions & 7 deletions vowpalwabbit/cb_explore.cc
Expand Up @@ -51,7 +51,7 @@ void predict_or_learn_first(cb_explore& data, single_learner& base, example& ec)
else
base.predict(ec);

probs.erase();
probs.clear();
if(data.tau > 0)
{
float prob = 1.f/(float)data.cbcs.num_actions;
Expand All @@ -77,7 +77,7 @@ void predict_or_learn_greedy(cb_explore& data, single_learner& base, example& ec
// TODO: pointers are copied here. What happens if base.learn/base.predict re-allocs?
// ec.pred.a_s = probs; will restore the than free'd memory
action_scores probs = ec.pred.a_s;
probs.erase();
probs.clear();

if (is_learn)
base.learn(ec);
Expand All @@ -98,7 +98,7 @@ void predict_or_learn_bag(cb_explore& data, single_learner& base, example& ec)
{
//Randomize over predictions from a base set of predictors
action_scores probs = ec.pred.a_s;
probs.erase();
probs.clear();

for(uint32_t i = 0; i < data.cbcs.num_actions; i++)
probs.push_back({i,0.});
Expand All @@ -123,7 +123,7 @@ void predict_or_learn_bag(cb_explore& data, single_learner& base, example& ec)
void get_cover_probabilities(cb_explore& data, single_learner& base, example& ec, v_array<action_score>& probs)
{
float additive_probability = 1.f / (float)data.cover_size;
data.preds.erase();
data.preds.clear();

for(uint32_t i = 0; i < data.cbcs.num_actions; i++)
probs.push_back({i,0.});
Expand Down Expand Up @@ -157,8 +157,8 @@ void predict_or_learn_cover(cb_explore& data, single_learner& base, example& ec)
uint32_t num_actions = data.cbcs.num_actions;

action_scores probs = ec.pred.a_s;
probs.erase();
data.cs_label.costs.erase();
probs.clear();
data.cs_label.costs.clear();

for (uint32_t j = 0; j < num_actions; j++)
data.cs_label.costs.push_back({FLT_MAX,j+1,0.,0.});
Expand All @@ -185,7 +185,7 @@ void predict_or_learn_cover(cb_explore& data, single_learner& base, example& ec)
//Now update oracles

//1. Compute loss vector
data.cs_label.costs.erase();
data.cs_label.costs.clear();
float norm = min_prob * num_actions;
ec.l.cb = data.cb_label;
data.cbcs.known_cost = get_observed_cost(data.cb_label);
Expand Down
8 changes: 4 additions & 4 deletions vowpalwabbit/cb_explore_adf.cc
Expand Up @@ -153,12 +153,12 @@ void predict_or_learn_bag(cb_explore_adf& data, multi_learner& base, multi_ex& e
num_actions--;
if (num_actions == 0)
{
preds.erase();
preds.clear();
return;
}

data.action_probs.resize(num_actions);
data.action_probs.erase();
data.action_probs.clear();
for (uint32_t i = 0; i < num_actions; i++)
data.action_probs.push_back({ i,0. });
vector<uint32_t>& top_actions = *data.top_actions;
Expand Down Expand Up @@ -218,7 +218,7 @@ void predict_or_learn_cover(cb_explore_adf& data, multi_learner& base, multi_ex&
float additive_probability = 1.f / (float)data.cover_size;
float min_prob = min(1.f / num_actions, 1.f / (float)sqrt(data.counter * num_actions));
v_array<action_score>& probs = data.action_probs;
probs.erase();
probs.clear();
for(uint32_t i = 0; i < num_actions; i++)
probs.push_back({i,0.});

Expand All @@ -232,7 +232,7 @@ void predict_or_learn_cover(cb_explore_adf& data, multi_learner& base, multi_ex&
//Create costs of each action based on online cover
if (is_learn)
{
data.cs_labels_2.costs.erase();
data.cs_labels_2.costs.clear();
if (shared > 0)
data.cs_labels_2.costs.push_back(data.cs_labels.costs[0]);
for (uint32_t j = 0; j < num_actions; j++)
Expand Down
4 changes: 2 additions & 2 deletions vowpalwabbit/cbify.cc
Expand Up @@ -101,7 +101,7 @@ void predict_or_learn(cbify& data, single_learner& base, example& ec)
{
//Store the multiclass input label
MULTICLASS::label_t ld = ec.l.multi;
data.cb_label.costs.erase();
data.cb_label.costs.clear();
ec.l.cb = data.cb_label;
ec.pred.a_s = data.a_s;

Expand All @@ -125,7 +125,7 @@ void predict_or_learn(cbify& data, single_learner& base, example& ec)
data.cb_label.costs.push_back(cl);
ec.l.cb = data.cb_label;
base.learn(ec);
data.a_s.erase();
data.a_s.clear();
data.a_s = ec.pred.a_s;
ec.l.multi = ld;
ec.pred.multiclass = chosen_action + 1;
Expand Down
8 changes: 4 additions & 4 deletions vowpalwabbit/cost_sensitive.cc
Expand Up @@ -42,7 +42,7 @@ bool is_test_label(label& ld)
char* bufread_label(label* ld, char* c, io_buf& cache)
{
size_t num = *(size_t *)c;
ld->costs.erase();
ld->costs.clear();
c += sizeof(size_t);
size_t total = sizeof(wclass)*num;
if (buf_read(cache, c, (int)total) < total)
Expand All @@ -63,7 +63,7 @@ char* bufread_label(label* ld, char* c, io_buf& cache)
size_t read_cached_label(shared_data*, void* v, io_buf& cache)
{
label* ld = (label*) v;
ld->costs.erase();
ld->costs.clear();
char *c;
size_t total = sizeof(size_t);
if (buf_read(cache, c, (int)total) < total)
Expand Down Expand Up @@ -101,7 +101,7 @@ void cache_label(void* v, io_buf& cache)
void default_label(void* v)
{
label* ld = (label*) v;
ld->costs.erase();
ld->costs.clear();
}

void delete_label(void* v)
Expand Down Expand Up @@ -131,7 +131,7 @@ bool substring_eq(substring ss, const char* str)
void parse_label(parser* p, shared_data*sd, void* v, v_array<substring>& words)
{
label* ld = (label*)v;
ld->costs.erase();
ld->costs.clear();

// handle shared and label first
if (words.size() == 1)
Expand Down
8 changes: 4 additions & 4 deletions vowpalwabbit/csoaa.cc
Expand Up @@ -237,7 +237,7 @@ void unsubtract_example(example *ec)
features& fs = ec->feature_space[wap_ldf_namespace];
ec->num_features -= fs.size();
ec->total_sum_feat_sq -= fs.sum_feat_sq;
fs.erase();
fs.clear();
ec->indices.decr();
}

Expand Down Expand Up @@ -445,8 +445,8 @@ void do_actual_learning(ldf& data, single_learner& base, multi_ex& ec_seq_all)
uint32_t predicted_K = start_K;
if(data.rank)
{
data.a_s.erase();
data.stored_preds.erase();
data.a_s.clear();
data.stored_preds.clear();
if (start_K > 0)
data.stored_preds.push_back(ec_seq[0]->pred.a_s);
for (uint32_t k=start_K; k<K; k++)
Expand Down Expand Up @@ -486,7 +486,7 @@ void do_actual_learning(ldf& data, single_learner& base, multi_ex& ec_seq_all)

if(data.rank)
{
data.stored_preds[0].erase();
data.stored_preds[0].clear();
if (start_K > 0)
{
ec_seq[0]->pred.a_s = data.stored_preds[0];
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/ect.cc
Expand Up @@ -236,7 +236,7 @@ void ect_train(ect& e, single_learner& base, example& ec)

simple_temp.initial = 0.;

e.tournaments_won.erase();
e.tournaments_won.clear();

uint32_t id = e.directions[mc.label - 1].winner;
bool left = e.directions[id].left == mc.label - 1;
Expand Down
14 changes: 7 additions & 7 deletions vowpalwabbit/ezexample.h
Expand Up @@ -36,10 +36,10 @@ class ezexample
example* get_new_example()
{ example* new_ec = VW::new_unused_example(*vw_par_ref);
vw_par_ref->p->lp.default_label(&new_ec->l);
new_ec->tag.erase();
new_ec->indices.erase();
new_ec->tag.clear();
new_ec->indices.clear();
for (size_t i=0; i<256; i++)
new_ec->feature_space[i].erase();
new_ec->feature_space[i].clear();

new_ec->ft_offset = 0;
new_ec->num_features = 0;
Expand Down Expand Up @@ -117,7 +117,7 @@ class ezexample
for (auto ecc : example_copies)
if (ecc->in_use && VW::is_ring_example(*vw_par_ref, ec))
VW::finish_example(*vw_par_ref, *ecc);
example_copies.erase();
example_copies.clear();
free(example_copies.begin());
}

Expand All @@ -132,7 +132,7 @@ class ezexample
void addns(char c)
{ if (ensure_ns_exists(c)) return;

ec->feature_space[(int)c].erase();
ec->feature_space[(int)c].clear();
past_seeds.push_back(current_seed);
current_ns = c;
str[0] = c;
Expand All @@ -147,7 +147,7 @@ class ezexample
else
{ if (ns_exists[(int)current_ns])
{ ec->total_sum_feat_sq -= ec->feature_space[(int)current_ns].sum_feat_sq;
ec->feature_space[(int)current_ns].erase();
ec->feature_space[(int)current_ns].clear();
ec->num_features -= ec->feature_space[(int)current_ns].size();

ns_exists[(int)current_ns] = false;
Expand Down Expand Up @@ -269,7 +269,7 @@ class ezexample
for (auto ecc : example_copies)
if (ecc->in_use)
VW::finish_example(*vw_par_ref, *ecc);
example_copies.erase();
example_copies.clear();
}
}

Expand Down
11 changes: 5 additions & 6 deletions vowpalwabbit/feature_group.h
Expand Up @@ -251,7 +251,7 @@ struct features
}

// if one wants to add proper destructor for features, make sure to update ezexample_predict::~ezexample_predict();
// ~features() { ... }
// ~features() { ... }

inline size_t size() const { return values.size(); }

Expand All @@ -271,11 +271,11 @@ struct features

iterator end() { return iterator(values.end(), indicies.end()); }

void erase()
void clear()
{ sum_feat_sq = 0.f;
values.erase();
indicies.erase();
space_names.erase();
values.clear();
indicies.clear();
space_names.clear();
}

void truncate_to(const features_value_iterator& pos)
Expand Down Expand Up @@ -351,4 +351,3 @@ struct features
sum_feat_sq = src.sum_feat_sq;
}
};

2 changes: 1 addition & 1 deletion vowpalwabbit/gd_mf.cc
Expand Up @@ -104,7 +104,7 @@ template<class T> float mf_predict(gdmf& d, example& ec, T& weights)
}

// clear stored predictions
d.scalars.erase();
d.scalars.clear();

float linear_prediction = 0.;
// linear terms
Expand Down

0 comments on commit fe480f2

Please sign in to comment.