Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resolve MSVC C4456 and C4459 #2511

Merged
merged 4 commits into from Jul 16, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 5 additions & 5 deletions explore/explore_internal.h
Expand Up @@ -14,10 +14,10 @@

namespace exploration
{
const uint64_t a = 0xeece66d5deece66dULL;
const uint64_t c = 2147483647;
const uint64_t CONSTANT_A = 0xeece66d5deece66dULL;
olgavrou marked this conversation as resolved.
Show resolved Hide resolved
const uint64_t CONSTANT_C = 2147483647;

const int bias = 127 << 23u;
const int BIAS = 127 << 23u;

union int_float
{
Expand All @@ -28,9 +28,9 @@ namespace exploration
// uniform random between 0 and 1
inline float uniform_random_merand48(uint64_t initial)
{
initial = a * initial + c;
initial = CONSTANT_A * initial + CONSTANT_C;
int_float temp;
temp.i = ((initial >> 25) & 0x7FFFFF) | bias;
temp.i = ((initial >> 25) & 0x7FFFFF) | BIAS;
return temp.f - 1;
}

Expand Down
22 changes: 11 additions & 11 deletions vowpalwabbit/OjaNewton.cc
Expand Up @@ -97,12 +97,12 @@ struct OjaNewton
{
for (int k = 1; k <= j - 1; k++)
{
double tmp = 0;
double temp = 0;

for (uint32_t i = 0; i < length; i++)
tmp += ((double)(&(weights.strided_index(i)))[j]) * (&(weights.strided_index(i)))[k];
temp += ((double)(&(weights.strided_index(i)))[j]) * (&(weights.strided_index(i)))[k];
for (uint32_t i = 0; i < length; i++)
(&(weights.strided_index(i)))[j] -= (float)tmp * (&(weights.strided_index(i)))[k];
(&(weights.strided_index(i)))[j] -= (float)temp * (&(weights.strided_index(i)))[k];
}
double norm = 0;
for (uint32_t i = 0; i < length; i++)
Expand All @@ -129,15 +129,15 @@ struct OjaNewton
for (int i = 1; i <= m; i++)
{
float gamma = fmin(learning_rate_cnt / t, 1.f);
float tmp = data.AZx[i] * data.sketch_cnt;
float temp = data.AZx[i] * data.sketch_cnt;

if (t == 1)
{
ev[i] = gamma * tmp * tmp;
ev[i] = gamma * temp * temp;
}
else
{
ev[i] = (1 - gamma) * t * ev[i] / (t - 1) + gamma * t * tmp * tmp;
ev[i] = (1 - gamma) * t * ev[i] / (t - 1) + gamma * t * temp * temp;
}
}
}
Expand Down Expand Up @@ -165,14 +165,14 @@ struct OjaNewton

void update_K()
{
float tmp = data.norm2_x * data.sketch_cnt * data.sketch_cnt;
float temp = data.norm2_x * data.sketch_cnt * data.sketch_cnt;
for (int i = 1; i <= m; i++)
{
for (int j = 1; j <= m; j++)
{
K[i][j] += data.delta[i] * data.Zx[j] * data.sketch_cnt;
K[i][j] += data.delta[j] * data.Zx[i] * data.sketch_cnt;
K[i][j] += data.delta[i] * data.delta[j] * tmp;
K[i][j] += data.delta[i] * data.delta[j] * temp;
}
}
}
Expand Down Expand Up @@ -230,12 +230,12 @@ struct OjaNewton
{
for (int j = 1; j <= m; j++)
{
float tmp = 0;
float temp = 0;
for (int i = j; i <= m; i++)
{
tmp += ev[i] * data.AZx[i] * A[i][j] / (alpha * (alpha + ev[i]));
temp += ev[i] * data.AZx[i] * A[i][j] / (alpha * (alpha + ev[i]));
}
b[j] += tmp * data.g;
b[j] += temp * data.g;
}
}

Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/cb_explore_adf_bag.cc
Expand Up @@ -90,7 +90,7 @@ void cb_explore_adf_bag::predict_or_learn_impl(VW::LEARNER::multi_learner& base,
if (!_first_only)
{
size_t tied_actions = fill_tied(preds);
for (size_t i = 0; i < tied_actions; ++i) _top_actions[preds[i].action] += 1.f / tied_actions;
for (size_t j = 0; j < tied_actions; ++j) _top_actions[preds[j].action] += 1.f / tied_actions;
}
else
_top_actions[preds[0].action] += 1.f;
Expand Down
10 changes: 5 additions & 5 deletions vowpalwabbit/cb_explore_adf_cover.cc
Expand Up @@ -129,18 +129,18 @@ void cb_explore_adf_cover::predict_or_learn_impl(VW::LEARNER::multi_learner& bas
GEN_CS::call_cs_ldf<false>(
*(_cs_ldf_learner), examples, _cb_labels, _cs_labels, _prepped_cs_labels, examples[0]->ft_offset, i + 1);

for (uint32_t i = 0; i < num_actions; i++) _scores[i] += preds[i].score;
for (uint32_t j = 0; j < num_actions; j++) _scores[j] += preds[j].score;
if (!_first_only)
{
size_t tied_actions = fill_tied(preds);
const float add_prob = additive_probability / tied_actions;
for (size_t i = 0; i < tied_actions; ++i)
for (size_t j = 0; j < tied_actions; ++j)
{
if (_action_probs[preds[i].action].score < min_prob)
norm += (std::max)(0.f, add_prob - (min_prob - _action_probs[preds[i].action].score));
if (_action_probs[preds[j].action].score < min_prob)
norm += (std::max)(0.f, add_prob - (min_prob - _action_probs[preds[j].action].score));
else
norm += add_prob;
_action_probs[preds[i].action].score += add_prob;
_action_probs[preds[j].action].score += add_prob;
}
}
else
Expand Down
26 changes: 13 additions & 13 deletions vowpalwabbit/csoaa.cc
Expand Up @@ -251,13 +251,13 @@ void unsubtract_example(example* ec)
void make_single_prediction(ldf& data, single_learner& base, example& ec)
{
COST_SENSITIVE::label ld = ec.l.cs;
label_data simple_label;
simple_label.initial = 0.;
simple_label.label = FLT_MAX;
label_data simple_lbl;
simple_lbl.initial = 0.;
simple_lbl.label = FLT_MAX;

LabelDict::add_example_namespace_from_memory(data.label_features, ec, ld.costs[0].class_index);

ec.l.simple = simple_label;
ec.l.simple = simple_lbl;
uint64_t old_offset = ec.ft_offset;
ec.ft_offset = data.ft_offset;
base.predict(ec); // make a prediction
Expand Down Expand Up @@ -302,7 +302,7 @@ void do_actual_learning_wap(ldf& data, single_learner& base, multi_ex& ec_seq)

// save original variables
COST_SENSITIVE::label save_cs_label = ec1->l.cs;
label_data& simple_label = ec1->l.simple;
label_data& simple_lbl = ec1->l.simple;

v_array<COST_SENSITIVE::wclass> costs1 = save_cs_label.costs;
if (costs1[0].class_index == (uint32_t)-1)
Expand Down Expand Up @@ -336,8 +336,8 @@ void do_actual_learning_wap(ldf& data, single_learner& base, multi_ex& ec_seq)
LabelDict::add_example_namespace_from_memory(data.label_features, *ec2, costs2[0].class_index);
float old_weight = ec1->weight;
uint64_t old_offset = ec1->ft_offset;
simple_label.initial = 0.;
simple_label.label = (costs1[0].x < costs2[0].x) ? -1.0f : 1.0f;
simple_lbl.initial = 0.;
simple_lbl.label = (costs1[0].x < costs2[0].x) ? -1.0f : 1.0f;
ec1->weight = value_diff;
ec1->partial_prediction = 0.;
subtract_example(*data.all, ec1, ec2);
Expand Down Expand Up @@ -382,26 +382,26 @@ void do_actual_learning_oaa(ldf& data, single_learner& base, multi_ex& ec_seq)
const auto& costs = save_cs_label.costs;

// build example for the base learner
label_data simple_label;
label_data simple_lbl;

simple_label.initial = 0.;
simple_lbl.initial = 0.;
float old_weight = ec->weight;
if (!data.treat_as_classifier) // treat like regression
simple_label.label = costs[0].x;
simple_lbl.label = costs[0].x;
else // treat like classification
{
if (costs[0].x <= min_cost)
{
simple_label.label = -1.;
simple_lbl.label = -1.;
ec->weight = old_weight * (max_cost - min_cost);
}
else
{
simple_label.label = 1.;
simple_lbl.label = 1.;
ec->weight = old_weight * (costs[0].x - min_cost);
}
}
ec->l.simple = simple_label;
ec->l.simple = simple_lbl;

// Prepare examples for learning
LabelDict::add_example_namespace_from_memory(data.label_features, *ec, costs[0].class_index);
Expand Down
34 changes: 17 additions & 17 deletions vowpalwabbit/ect.cc
Expand Up @@ -129,52 +129,52 @@ size_t create_circuit(ect& e, uint64_t max_label, uint64_t eliminations)
v_array<v_array<uint32_t>> new_tournaments = v_init<v_array<uint32_t>>();
tournaments = e.all_levels[level];

for (size_t t = 0; t < tournaments.size(); t++)
for (size_t i = 0; i < tournaments.size(); i++)
{
v_array<uint32_t> empty = v_init<uint32_t>();
new_tournaments.push_back(empty);
}

for (size_t t = 0; t < tournaments.size(); t++)
for (size_t i = 0; i < tournaments.size(); i++)
{
for (size_t j = 0; j < tournaments[t].size() / 2; j++)
for (size_t j = 0; j < tournaments[i].size() / 2; j++)
{
uint32_t id = node++;
uint32_t left = tournaments[t][2 * j];
uint32_t right = tournaments[t][2 * j + 1];
uint32_t left = tournaments[i][2 * j];
uint32_t right = tournaments[i][2 * j + 1];

direction d = {id, t, 0, 0, left, right, false};
direction d = {id, i, 0, 0, left, right, false};
e.directions.push_back(d);
uint32_t direction_index = (uint32_t)e.directions.size() - 1;
if (e.directions[left].tournament == t)
if (e.directions[left].tournament == i)
e.directions[left].winner = direction_index;
else
e.directions[left].loser = direction_index;
if (e.directions[right].tournament == t)
if (e.directions[right].tournament == i)
e.directions[right].winner = direction_index;
else
e.directions[right].loser = direction_index;
if (e.directions[left].last)
e.directions[left].winner = direction_index;

if (tournaments[t].size() == 2 && (t == 0 || tournaments[t - 1].empty()))
if (tournaments[i].size() == 2 && (i == 0 || tournaments[i - 1].empty()))
{
e.directions[direction_index].last = true;
if (t + 1 < tournaments.size())
new_tournaments[t + 1].push_back(id);
if (i + 1 < tournaments.size())
new_tournaments[i + 1].push_back(id);
else // winner eliminated.
e.directions[direction_index].winner = 0;
e.final_nodes.push_back((uint32_t)(e.directions.size() - 1));
}
else
new_tournaments[t].push_back(id);
if (t + 1 < tournaments.size())
new_tournaments[t + 1].push_back(id);
new_tournaments[i].push_back(id);
if (i + 1 < tournaments.size())
new_tournaments[i + 1].push_back(id);
else // loser eliminated.
e.directions[direction_index].loser = 0;
}
if (tournaments[t].size() % 2 == 1)
new_tournaments[t].push_back(tournaments[t].last());
if (tournaments[i].size() % 2 == 1)
new_tournaments[i].push_back(tournaments[i].last());
}
e.all_levels.push_back(new_tournaments);
level++;
Expand Down Expand Up @@ -285,7 +285,7 @@ void ect_train(ect& e, single_learner& base, example& ec)
{
for (uint32_t j = 0; j < e.tournaments_won.size() / 2; j++)
{
bool left = e.tournaments_won[j * 2];
left = e.tournaments_won[j * 2];
bool right = e.tournaments_won[j * 2 + 1];
if (left == right) // no query to do
e.tournaments_won[j] = left;
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/gd.cc
Expand Up @@ -807,7 +807,7 @@ void save_load_online_state(
brw += model_file.bin_read_fixed((char*)buff, sizeof(buff[0]) * 3, "");
uint32_t stride = 1 << weights.stride_shift();
weight* v = &weights.strided_index(i);
for (size_t i = 0; i < stride; i++) v[i] = buff[i];
for (size_t j = 0; j < stride; j++) v[j] = buff[j];
}
} while (brw > 0);
else // write binary or text
Expand Down
4 changes: 2 additions & 2 deletions vowpalwabbit/interactions.cc
Expand Up @@ -315,15 +315,15 @@ void eval_count_of_generated_ft(vw& all, example& ec, size_t& new_features_cnt,

if (!PROCESS_SELF_INTERACTIONS(fs.values[i]))
{
for (size_t i = order_of_inter - 1; i > 0; --i) results[i] += results[i - 1] * x;
for (size_t j = order_of_inter - 1; j > 0; --j) results[j] += results[j - 1] * x;

results[0] += x;
}
else
{
results[0] += x;

for (size_t i = 1; i < order_of_inter; ++i) results[i] += results[i - 1] * x;
for (size_t j = 1; j < order_of_inter; ++j) results[j] += results[j - 1] * x;

++cnt_ft_value_non_1;
}
Expand Down
21 changes: 10 additions & 11 deletions vowpalwabbit/lda_core.cc
Expand Up @@ -593,37 +593,37 @@ float lda::powf(float x, float p)
}
}

void lda::expdigammify(vw &all, float *gamma)
void lda::expdigammify(vw &all_, float *gamma)
{
switch (mmode)
{
case USE_FAST_APPROX:
ldamath::expdigammify<float, USE_FAST_APPROX>(all, gamma, underflow_threshold, 0.0f);
ldamath::expdigammify<float, USE_FAST_APPROX>(all_, gamma, underflow_threshold, 0.0f);
break;
case USE_PRECISE:
ldamath::expdigammify<float, USE_PRECISE>(all, gamma, underflow_threshold, 0.0f);
ldamath::expdigammify<float, USE_PRECISE>(all_, gamma, underflow_threshold, 0.0f);
break;
case USE_SIMD:
ldamath::expdigammify<float, USE_SIMD>(all, gamma, underflow_threshold, 0.0f);
ldamath::expdigammify<float, USE_SIMD>(all_, gamma, underflow_threshold, 0.0f);
break;
default:
std::cerr << "lda::expdigammify: Trampled or invalid math mode, aborting" << std::endl;
abort();
}
}

void lda::expdigammify_2(vw &all, float *gamma, float *norm)
void lda::expdigammify_2(vw &all_, float *gamma, float *norm)
{
switch (mmode)
{
case USE_FAST_APPROX:
ldamath::expdigammify_2<float, USE_FAST_APPROX>(all, gamma, norm, underflow_threshold);
ldamath::expdigammify_2<float, USE_FAST_APPROX>(all_, gamma, norm, underflow_threshold);
break;
case USE_PRECISE:
ldamath::expdigammify_2<float, USE_PRECISE>(all, gamma, norm, underflow_threshold);
ldamath::expdigammify_2<float, USE_PRECISE>(all_, gamma, norm, underflow_threshold);
break;
case USE_SIMD:
ldamath::expdigammify_2<float, USE_SIMD>(all, gamma, norm, underflow_threshold);
ldamath::expdigammify_2<float, USE_SIMD>(all_, gamma, norm, underflow_threshold);
break;
default:
std::cerr << "lda::expdigammify_2: Trampled or invalid math mode, aborting" << std::endl;
Expand Down Expand Up @@ -1175,9 +1175,8 @@ void compute_coherence_metrics(lda &l, T &weights)
}
else
{
std::vector<word_doc_frequency> vec = {{f2, 0}};
coWordsDFSet.insert(std::make_pair(f1, vec));
// printf(" insert %d %d\n", f1, f2);
std::vector<word_doc_frequency> tmp_vec = {{f2, 0}};
coWordsDFSet.insert(std::make_pair(f1, tmp_vec));
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/mf.cc
Expand Up @@ -168,7 +168,7 @@ void learn(mf& data, single_learner& base, example& ec)
{
features& fs = ec.feature_space[right_ns];
// multiply features in right namespace by l^k * x_l
for (size_t i = 0; i < fs.size(); ++i) fs.values[i] *= data.sub_predictions[2 * k - 1];
for (size_t j = 0; j < fs.size(); ++j) fs.values[j] *= data.sub_predictions[2 * k - 1];

// update r^k using base learner
base.update(ec, k + data.rank);
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/parse_args.cc
Expand Up @@ -967,7 +967,7 @@ void parse_feature_tweaks(options_i& options, vw& all, std::vector<std::string>&
else
{
// wildcard found: redefine all except default and break
for (size_t i = 0; i < 256; i++) all.redefine[i] = new_namespace;
for (size_t j = 0; j < 256; j++) all.redefine[j] = new_namespace;
break; // break processing S
}
}
Expand Down
2 changes: 1 addition & 1 deletion vowpalwabbit/parse_example.cc
Expand Up @@ -285,7 +285,7 @@ class TC_parser
}

VW::string_view spelling_strview(_spelling.begin(), _spelling.size());
uint64_t word_hash = hashstring(spelling_strview.begin(), spelling_strview.length(), (uint64_t)_channel_hash);
word_hash = hashstring(spelling_strview.begin(), spelling_strview.length(), (uint64_t)_channel_hash);
spell_fs.push_back(_v, word_hash);
if (audit)
{
Expand Down
4 changes: 2 additions & 2 deletions vowpalwabbit/parse_slates_example_json.h
Expand Up @@ -281,9 +281,9 @@ void parse_slates_example_dsjson(vw& all, v_array<example*>& examples, char* lin
{
assert(probs.Size() == destination.size());
const auto& probs_array = probs.GetArray();
for (rapidjson::SizeType i = 0; i < probs_array.Size(); i++)
for (rapidjson::SizeType j = 0; j < probs_array.Size(); j++)
{
destination[i].score = probs_array[i].GetFloat();
destination[j].score = probs_array[j].GetFloat();
}
}
else
Expand Down