Skip to content
Permalink
Browse files

Fix style with clang-format (before releasing 8.7.0)

  • Loading branch information...
JohnLangford committed Jun 7, 2019
1 parent 2d8eb5e commit 22ad6283267e4173ab28783d47f915e245d3a03a
Showing with 15,459 additions and 16,069 deletions.
  1. +167 −208 vowpalwabbit/OjaNewton.cc
  2. +1 −1 vowpalwabbit/OjaNewton.h
  3. +65 −53 vowpalwabbit/accumulate.cc
  4. +4 −4 vowpalwabbit/accumulate.h
  5. +10 −14 vowpalwabbit/action_score.cc
  6. +40 −36 vowpalwabbit/action_score.h
  7. +43 −51 vowpalwabbit/active.cc
  8. +4 −5 vowpalwabbit/active.h
  9. +74 −83 vowpalwabbit/active_cover.cc
  10. +2 −1 vowpalwabbit/active_cover.h
  11. +34 −51 vowpalwabbit/active_interactor.cc
  12. +121 −121 vowpalwabbit/allreduce.h
  13. +77 −84 vowpalwabbit/allreduce_sockets.cc
  14. +16 −24 vowpalwabbit/allreduce_threads.cc
  15. +84 −108 vowpalwabbit/array_parameters.h
  16. +60 −58 vowpalwabbit/array_parameters_dense.h
  17. +114 −104 vowpalwabbit/audit_regressor.cc
  18. +2 −1 vowpalwabbit/audit_regressor.h
  19. +12 −13 vowpalwabbit/autolink.cc
  20. +1 −1 vowpalwabbit/autolink.h
  21. +54 −72 vowpalwabbit/baseline.cc
  22. +6 −7 vowpalwabbit/baseline.h
  23. +96 −99 vowpalwabbit/beam.h
  24. +43 −38 vowpalwabbit/best_constant.cc
  25. +9 −14 vowpalwabbit/best_constant.h
  26. +387 −409 vowpalwabbit/bfgs.cc
  27. +1 −1 vowpalwabbit/bfgs.h
  28. +12 −11 vowpalwabbit/binary.cc
  29. +1 −1 vowpalwabbit/binary.h
  30. +97 −127 vowpalwabbit/boosting.cc
  31. +1 −1 vowpalwabbit/boosting.h
  32. +91 −93 vowpalwabbit/bs.cc
  33. +4 −5 vowpalwabbit/bs.h
  34. +50 −60 vowpalwabbit/cache.cc
  35. +10 −9 vowpalwabbit/cache.h
  36. +91 −110 vowpalwabbit/cb.cc
  37. +19 −21 vowpalwabbit/cb.h
  38. +184 −185 vowpalwabbit/cb_adf.cc
  39. +5 −6 vowpalwabbit/cb_adf.h
  40. +68 −76 vowpalwabbit/cb_algs.cc
  41. +13 −18 vowpalwabbit/cb_algs.h
  42. +118 −101 vowpalwabbit/cb_explore.cc
  43. +4 −5 vowpalwabbit/cb_explore.h
  44. +327 −329 vowpalwabbit/cb_explore_adf.cc
  45. +4 −4 vowpalwabbit/cb_explore_adf.h
  46. +131 −144 vowpalwabbit/cbify.cc
  47. +2 −2 vowpalwabbit/cbify.h
  48. +34 −37 vowpalwabbit/classweight.cc
  49. +2 −1 vowpalwabbit/classweight.h
  50. +35 −42 vowpalwabbit/comp_io.cc
  51. +8 −9 vowpalwabbit/comp_io.h
  52. +38 −38 vowpalwabbit/confidence.cc
  53. +2 −1 vowpalwabbit/confidence.h
  54. +8 −11 vowpalwabbit/correctedMath.h
  55. +110 −133 vowpalwabbit/cost_sensitive.cc
  56. +14 −17 vowpalwabbit/cost_sensitive.h
  57. +187 −150 vowpalwabbit/cs_active.cc
  58. +1 −1 vowpalwabbit/cs_active.h
  59. +250 −278 vowpalwabbit/csoaa.cc
  60. +8 −9 vowpalwabbit/csoaa.h
  61. +91 −102 vowpalwabbit/ect.cc
  62. +1 −1 vowpalwabbit/ect.h
  63. +8 −10 vowpalwabbit/error_reporting.h
  64. +65 −82 vowpalwabbit/example.cc
  65. +60 −57 vowpalwabbit/example.h
  66. +4 −5 vowpalwabbit/example_predict.cc
  67. +16 −19 vowpalwabbit/example_predict.h
  68. +58 −64 vowpalwabbit/explore_eval.cc
  69. +2 −1 vowpalwabbit/explore_eval.h
  70. +41 −42 vowpalwabbit/expreplay.h
  71. +112 −138 vowpalwabbit/ezexample.h
  72. +130 −153 vowpalwabbit/feature_group.h
  73. +2 −4 vowpalwabbit/floatbits.h
  74. +135 −144 vowpalwabbit/ftrl.cc
  75. +1 −1 vowpalwabbit/ftrl.h
  76. +506 −508 vowpalwabbit/gd.cc
  77. +58 −58 vowpalwabbit/gd.h
  78. +141 −137 vowpalwabbit/gd_mf.cc
  79. +1 −1 vowpalwabbit/gd_mf.h
  80. +57 −48 vowpalwabbit/gd_predict.h
  81. +61 −68 vowpalwabbit/gen_cs_example.cc
  82. +124 −113 vowpalwabbit/gen_cs_example.h
  83. +73 −102 vowpalwabbit/global_data.cc
  84. +219 −217 vowpalwabbit/global_data.h
  85. +61 −58 vowpalwabbit/interact.cc
  86. +1 −1 vowpalwabbit/interact.h
  87. +150 −140 vowpalwabbit/interactions.cc
  88. +32 −25 vowpalwabbit/interactions.h
  89. +199 −172 vowpalwabbit/interactions_predict.h
  90. +38 −47 vowpalwabbit/io_buf.cc
  91. +128 −137 vowpalwabbit/io_buf.h
  92. +312 −339 vowpalwabbit/kernel_svm.cc
  93. +2 −1 vowpalwabbit/kernel_svm.h
  94. +32 −41 vowpalwabbit/label_dictionary.cc
  95. +13 −12 vowpalwabbit/label_dictionary.h
  96. +17 −15 vowpalwabbit/label_parser.h
  97. +413 −474 vowpalwabbit/lda_core.cc
  98. +3 −2 vowpalwabbit/lda_core.h
  99. +83 −99 vowpalwabbit/learner.cc
  100. +212 −209 vowpalwabbit/learner.h
  101. +164 −158 vowpalwabbit/log_multi.cc
  102. +1 −1 vowpalwabbit/log_multi.h
  103. +121 −148 vowpalwabbit/loss_functions.cc
  104. +18 −12 vowpalwabbit/loss_functions.h
  105. +59 −67 vowpalwabbit/lrq.cc
  106. +1 −1 vowpalwabbit/lrq.h
  107. +57 −55 vowpalwabbit/lrqfa.cc
  108. +1 −1 vowpalwabbit/lrqfa.h
  109. +63 −65 vowpalwabbit/main.cc
  110. +145 −147 vowpalwabbit/marginal.cc
  111. +1 −1 vowpalwabbit/marginal.h
  112. +26 −43 vowpalwabbit/memory.h
  113. +420 −471 vowpalwabbit/memory_tree.cc
  114. +2 −1 vowpalwabbit/memory_tree.h
  115. +28 −33 vowpalwabbit/mf.cc
  116. +1 −1 vowpalwabbit/mf.h
  117. +85 −84 vowpalwabbit/multiclass.cc
  118. +8 −14 vowpalwabbit/multiclass.h
  119. +68 −90 vowpalwabbit/multilabel.cc
  120. +6 −7 vowpalwabbit/multilabel.h
  121. +20 −23 vowpalwabbit/multilabel_oaa.cc
  122. +2 −1 vowpalwabbit/multilabel_oaa.h
  123. +101 −99 vowpalwabbit/mwt.cc
  124. +5 −6 vowpalwabbit/mwt.h
  125. +15 −18 vowpalwabbit/network.cc
  126. +1 −1 vowpalwabbit/network.h
  127. +133 −119 vowpalwabbit/nn.cc
  128. +1 −1 vowpalwabbit/nn.h
  129. +37 −38 vowpalwabbit/no_label.cc
  130. +5 −6 vowpalwabbit/no_label.h
  131. +4 −4 vowpalwabbit/noop.cc
  132. +1 −1 vowpalwabbit/noop.h
  133. +112 −104 vowpalwabbit/oaa.cc
  134. +1 −1 vowpalwabbit/oaa.h
  135. +27 −39 vowpalwabbit/object_pool.h
  136. +52 −68 vowpalwabbit/options.h
  137. +57 −64 vowpalwabbit/options_boost_po.cc
  138. +111 −104 vowpalwabbit/options_boost_po.h
  139. +13 −9 vowpalwabbit/options_serializer_boost_po.cc
  140. +22 −34 vowpalwabbit/options_serializer_boost_po.h
  141. +13 −21 vowpalwabbit/options_types.h
  142. +769 −688 vowpalwabbit/parse_args.cc
  143. +11 −8 vowpalwabbit/parse_args.h
  144. +24 −30 vowpalwabbit/parse_dispatch_loop.h
  145. +172 −204 vowpalwabbit/parse_example.cc
  146. +13 −17 vowpalwabbit/parse_example.h
  147. +413 −494 vowpalwabbit/parse_example_json.h
  148. +21 −24 vowpalwabbit/parse_primitives.cc
  149. +59 −67 vowpalwabbit/parse_primitives.h
  150. +240 −262 vowpalwabbit/parse_regressor.cc
  151. +11 −8 vowpalwabbit/parse_regressor.h
  152. +278 −335 vowpalwabbit/parser.cc
  153. +38 −36 vowpalwabbit/parser.h
  154. +4 −4 vowpalwabbit/primitives.cc
  155. +1 −1 vowpalwabbit/primitives.h
  156. +14 −20 vowpalwabbit/print.cc
  157. +1 −1 vowpalwabbit/print.h
  158. +16 −24 vowpalwabbit/queue.h
  159. +2 −3 vowpalwabbit/rand48.cc
  160. +1 −1 vowpalwabbit/rand48.h
  161. +124 −144 vowpalwabbit/recall_tree.cc
  162. +2 −1 vowpalwabbit/recall_tree.h
  163. +5 −5 vowpalwabbit/reductions.h
  164. +35 −35 vowpalwabbit/scorer.cc
  165. +1 −1 vowpalwabbit/scorer.h
  166. +1,523 −1,375 vowpalwabbit/search.cc
  167. +196 −173 vowpalwabbit/search.h
  168. +291 −277 vowpalwabbit/search_dep_parser.cc
  169. +6 −7 vowpalwabbit/search_dep_parser.h
  170. +151 −178 vowpalwabbit/search_entityrelationtask.cc
  171. +5 −6 vowpalwabbit/search_entityrelationtask.h
  172. +141 −152 vowpalwabbit/search_graph.cc
  173. +7 −8 vowpalwabbit/search_graph.h
  174. +17 −22 vowpalwabbit/search_hooktask.cc
  175. +29 −26 vowpalwabbit/search_hooktask.h
  176. +118 −100 vowpalwabbit/search_meta.cc
  177. +3 −4 vowpalwabbit/search_meta.h
  178. +20 −22 vowpalwabbit/search_multiclasstask.cc
  179. +5 −6 vowpalwabbit/search_multiclasstask.h
  180. +212 −209 vowpalwabbit/search_sequencetask.cc
  181. +25 −30 vowpalwabbit/search_sequencetask.h
  182. +26 −33 vowpalwabbit/sender.cc
  183. +1 −1 vowpalwabbit/sender.h
  184. +28 −29 vowpalwabbit/shared_feature_merger.cc
  185. +6 −7 vowpalwabbit/shared_feature_merger.h
  186. +78 −76 vowpalwabbit/simple_label.cc
  187. +5 −6 vowpalwabbit/simple_label.h
  188. +100 −124 vowpalwabbit/spanning_tree.cc
  189. +14 −18 vowpalwabbit/spanning_tree.h
  190. +7 −8 vowpalwabbit/stable_unique.h
  191. +222 −209 vowpalwabbit/stagewise_poly.cc
  192. +2 −1 vowpalwabbit/stagewise_poly.h
  193. +54 −68 vowpalwabbit/svrg.cc
  194. +1 −1 vowpalwabbit/svrg.h
  195. +36 −34 vowpalwabbit/topk.cc
  196. +1 −1 vowpalwabbit/topk.h
  197. +7 −9 vowpalwabbit/unique_sort.cc
  198. +3 −3 vowpalwabbit/unique_sort.h
  199. +80 −99 vowpalwabbit/v_array.h
  200. +65 −78 vowpalwabbit/v_hashmap.h
  201. +116 −102 vowpalwabbit/vw.h
  202. +10 −12 vowpalwabbit/vw_allreduce.h
  203. +24 −27 vowpalwabbit/vw_exception.cc
  204. +88 −95 vowpalwabbit/vw_exception.h
  205. +17 −16 vowpalwabbit/vw_validate.cc
  206. +6 −7 vowpalwabbit/vw_validate.h
  207. +16 −9 vowpalwabbit/vw_versions.h
  208. +118 −82 vowpalwabbit/vwdll.h
  209. +197 −201 vowpalwabbit/warm_cb.cc
  210. +1 −1 vowpalwabbit/warm_cb.h

Large diffs are not rendered by default.

@@ -3,4 +3,4 @@ Copyright (c) by respective owners including Yahoo!, Microsoft, and
individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
LEARNER::base_learner* OjaNewton_setup(VW::config::options_i& options, vw& all);
LEARNER::base_learner *OjaNewton_setup(VW::config::options_i &options, vw &all);
@@ -8,84 +8,97 @@ This implements the allreduce function of MPI. Code primarily by
Alekh Agarwal and John Langford, with help Olivier Chapelle.
*/

#include <iostream>
#include <sys/timeb.h>
#include <cmath>
#include <stdint.h>
#include "global_data.h"
#include "vw_allreduce.h"
#include <cmath>
#include <iostream>
#include <stdint.h>
#include <sys/timeb.h>

using namespace std;

void add_float(float& c1, const float& c2) { c1 += c2; }
void add_float(float &c1, const float &c2) { c1 += c2; }

void accumulate(vw& all, parameters& weights, size_t offset)
{
uint64_t length = UINT64_ONE << all.num_bits; // This is size of gradient
float* local_grad = new float[length];
void accumulate(vw &all, parameters &weights, size_t offset) {
uint64_t length = UINT64_ONE << all.num_bits; // This is size of gradient
float *local_grad = new float[length];

if (weights.sparse)
for (uint64_t i = 0; i < length; i++)
local_grad[i] = (&(weights.sparse_weights[i << weights.sparse_weights.stride_shift()]))[offset];
local_grad[i] =
(&(weights.sparse_weights[i << weights.sparse_weights
.stride_shift()]))[offset];
else
for (uint64_t i = 0; i < length; i++)
local_grad[i] = (&(weights.dense_weights[i << weights.dense_weights.stride_shift()]))[offset];
local_grad[i] =
(&(weights.dense_weights[i << weights.dense_weights
.stride_shift()]))[offset];

all_reduce<float, add_float>(all, local_grad, length); // TODO: modify to not use first()
all_reduce<float, add_float>(all, local_grad,
length); // TODO: modify to not use first()

if (weights.sparse)
for (uint64_t i = 0; i < length; i++)
(&(weights.sparse_weights[i << weights.sparse_weights.stride_shift()]))[offset] = local_grad[i];
(&(weights.sparse_weights[i << weights.sparse_weights
.stride_shift()]))[offset] =
local_grad[i];
else
for (uint64_t i = 0; i < length; i++)
(&(weights.dense_weights[i << weights.dense_weights.stride_shift()]))[offset] = local_grad[i];
(&(weights.dense_weights[i << weights.dense_weights
.stride_shift()]))[offset] =
local_grad[i];

delete[] local_grad;
}

float accumulate_scalar(vw& all, float local_sum)
{
float accumulate_scalar(vw &all, float local_sum) {
float temp = local_sum;
all_reduce<float, add_float>(all, &temp, 1);
return temp;
}

void accumulate_avg(vw& all, parameters& weights, size_t offset)
{
uint32_t length = 1 << all.num_bits; // This is size of gradient
void accumulate_avg(vw &all, parameters &weights, size_t offset) {
uint32_t length = 1 << all.num_bits; // This is size of gradient
float numnodes = (float)all.all_reduce->total;
float* local_grad = new float[length];
float *local_grad = new float[length];

if (weights.sparse)
for (uint64_t i = 0; i < length; i++)
local_grad[i] = (&(weights.sparse_weights[i << weights.sparse_weights.stride_shift()]))[offset];
local_grad[i] =
(&(weights.sparse_weights[i << weights.sparse_weights
.stride_shift()]))[offset];
else
for (uint64_t i = 0; i < length; i++)
local_grad[i] = (&(weights.dense_weights[i << weights.dense_weights.stride_shift()]))[offset];
local_grad[i] =
(&(weights.dense_weights[i << weights.dense_weights
.stride_shift()]))[offset];

all_reduce<float, add_float>(all, local_grad, length); // TODO: modify to not use first()
all_reduce<float, add_float>(all, local_grad,
length); // TODO: modify to not use first()

if (weights.sparse)
for (uint64_t i = 0; i < length; i++)
(&(weights.sparse_weights[i << weights.sparse_weights.stride_shift()]))[offset] = local_grad[i] / numnodes;
(&(weights.sparse_weights[i << weights.sparse_weights
.stride_shift()]))[offset] =
local_grad[i] / numnodes;
else
for (uint64_t i = 0; i < length; i++)
(&(weights.dense_weights[i << weights.dense_weights.stride_shift()]))[offset] = local_grad[i] / numnodes;
(&(weights.dense_weights[i << weights.dense_weights
.stride_shift()]))[offset] =
local_grad[i] / numnodes;

delete[] local_grad;
}

float max_elem(float* arr, int length)
{
float max_elem(float *arr, int length) {
float max = arr[0];
for (int i = 1; i < length; i++)
if (arr[i] > max)
max = arr[i];
return max;
}

float min_elem(float* arr, int length)
{
float min_elem(float *arr, int length) {
float min = arr[0];
for (int i = 1; i < length; i++)
if (arr[i] < min && arr[i] > 0.001)
@@ -94,44 +107,41 @@ float min_elem(float* arr, int length)
}

template <class T>
void do_weighting(vw& all, uint64_t length, float* local_weights, T& weights)
{
for (uint64_t i = 0; i < length; i++)
{
float* weight = &weights[i << weights.stride_shift()];
if (local_weights[i] > 0)
{
void do_weighting(vw &all, uint64_t length, float *local_weights, T &weights) {
for (uint64_t i = 0; i < length; i++) {
float *weight = &weights[i << weights.stride_shift()];
if (local_weights[i] > 0) {
float ratio = weight[1] / local_weights[i];
local_weights[i] = weight[0] * ratio;
weight[0] *= ratio;
weight[1] *= ratio; // A crude max
weight[1] *= ratio; // A crude max
if (all.normalized_updates)
weight[all.normalized_idx] *= ratio; // A crude max
}
else
{
weight[all.normalized_idx] *= ratio; // A crude max
} else {
local_weights[i] = 0;
*weight = 0;
}
}
}

void accumulate_weighted_avg(vw& all, parameters& weights)
{
if (!all.adaptive)
{
all.trace_message << "Weighted averaging is implemented only for adaptive gradient, use accumulate_avg instead\n";
void accumulate_weighted_avg(vw &all, parameters &weights) {
if (!all.adaptive) {
all.trace_message << "Weighted averaging is implemented only for adaptive "
"gradient, use accumulate_avg instead\n";
return;
}
uint32_t length = 1 << all.num_bits; // This is the number of parameters
float* local_weights = new float[length];
uint32_t length = 1 << all.num_bits; // This is the number of parameters
float *local_weights = new float[length];

if (weights.sparse)
for (uint64_t i = 0; i < length; i++)
local_weights[i] = (&(weights.sparse_weights[i << weights.sparse_weights.stride_shift()]))[1];
local_weights[i] = (&(
weights
.sparse_weights[i << weights.sparse_weights.stride_shift()]))[1];
else
for (uint64_t i = 0; i < length; i++)
local_weights[i] = (&(weights.dense_weights[i << weights.dense_weights.stride_shift()]))[1];
local_weights[i] = (&(
weights.dense_weights[i << weights.dense_weights.stride_shift()]))[1];

// First compute weights for averaging
all_reduce<float, add_float>(all, local_weights, length);
@@ -142,9 +152,11 @@ void accumulate_weighted_avg(vw& all, parameters& weights)
do_weighting(all, length, local_weights, weights.dense_weights);

if (weights.sparse)
cout << "sparse parameters not supported with parallel computation!" << endl;
cout << "sparse parameters not supported with parallel computation!"
<< endl;
else
all_reduce<float, add_float>(
all, weights.dense_weights.first(), ((size_t)length) * (1ull << weights.stride_shift()));
all_reduce<float, add_float>(all, weights.dense_weights.first(),
((size_t)length) *
(1ull << weights.stride_shift()));
delete[] local_weights;
}
@@ -7,7 +7,7 @@ license as described in the file LICENSE.
#pragma once
#include "global_data.h"

void accumulate(vw& all, parameters& weights, size_t o);
float accumulate_scalar(vw& all, float local_sum);
void accumulate_weighted_avg(vw& all, parameters& weights);
void accumulate_avg(vw& all, parameters& weights, size_t o);
void accumulate(vw &all, parameters &weights, size_t o);
float accumulate_scalar(vw &all, float local_sum);
void accumulate_weighted_avg(vw &all, parameters &weights);
void accumulate_avg(vw &all, parameters &weights, size_t o);
@@ -1,33 +1,29 @@
#include "v_array.h"
#include "action_score.h"
#include "io_buf.h"
#include "v_array.h"
using namespace std;
namespace ACTION_SCORE
{
void print_action_score(int f, v_array<action_score>& a_s, v_array<char>&)
{
if (f >= 0)
{
namespace ACTION_SCORE {
void print_action_score(int f, v_array<action_score> &a_s, v_array<char> &) {
if (f >= 0) {
std::stringstream ss;

for (size_t i = 0; i < a_s.size(); i++)
{
for (size_t i = 0; i < a_s.size(); i++) {
if (i > 0)
ss << ',';
ss << a_s[i].action << ':' << a_s[i].score;
}
ss << '\n';
ssize_t len = ss.str().size();
ssize_t t = io_buf::write_file_or_socket(f, ss.str().c_str(), (unsigned int)len);
ssize_t t =
io_buf::write_file_or_socket(f, ss.str().c_str(), (unsigned int)len);
if (t != len)
cerr << "write error: " << strerror(errno) << endl;
}
}

void delete_action_scores(void* v)
{
v_array<action_score>* cs = (v_array<action_score>*)v;
void delete_action_scores(void *v) {
v_array<action_score> *cs = (v_array<action_score> *)v;
cs->delete_v();
}

} // namespace ACTION_SCORE
} // namespace ACTION_SCORE
@@ -1,65 +1,67 @@
#pragma once
namespace ACTION_SCORE
{
struct action_score
{
namespace ACTION_SCORE {
struct action_score {
uint32_t action;
float score;
};

typedef v_array<action_score> action_scores;

class score_iterator : public virtual std::iterator<std::random_access_iterator_tag, // iterator_cateogry
float, // value_type
long, // difference_type
float*, // pointer
float // reference
>
{
action_score* _p;
class score_iterator : public virtual std::iterator<
std::random_access_iterator_tag, // iterator_cateogry
float, // value_type
long, // difference_type
float *, // pointer
float // reference
> {
action_score *_p;

public:
score_iterator(action_score* p) : _p(p) {}
public:
score_iterator(action_score *p) : _p(p) {}

score_iterator& operator++()
{
score_iterator &operator++() {
++_p;
return *this;
}

score_iterator operator+(size_t n) { return score_iterator(_p + n); }

bool operator==(const score_iterator& other) const { return _p == other._p; }
bool operator==(const score_iterator &other) const { return _p == other._p; }

bool operator!=(const score_iterator& other) const { return _p != other._p; }
bool operator!=(const score_iterator &other) const { return _p != other._p; }

bool operator<(const score_iterator& other) const { return _p < other._p; }
bool operator<(const score_iterator &other) const { return _p < other._p; }

size_t operator-(const score_iterator& other) const { return _p - other._p; }
size_t operator-(const score_iterator &other) const { return _p - other._p; }

float& operator*() { return _p->score; }
float &operator*() { return _p->score; }
};

inline score_iterator begin_scores(action_scores& a_s) { return score_iterator(a_s.begin()); }
inline score_iterator begin_scores(action_scores &a_s) {
return score_iterator(a_s.begin());
}

inline score_iterator end_scores(action_scores& a_s) { return score_iterator(a_s.end()); }
inline score_iterator end_scores(action_scores &a_s) {
return score_iterator(a_s.end());
}

inline int cmp(size_t a, size_t b)
{
inline int cmp(size_t a, size_t b) {
if (a == b)
return 0;
if (a > b)
return 1;
return -1;
}

inline int score_comp(const void* p1, const void* p2)
{
action_score* s1 = (action_score*)p1;
action_score* s2 = (action_score*)p2;
// Most sorting algos do not guarantee the output order of elements that compare equal.
// Tie-breaking on the index ensures that the result is deterministic across platforms.
// However, this forces a strict ordering, rather than a weak ordering, which carries a performance cost.
inline int score_comp(const void *p1, const void *p2) {
action_score *s1 = (action_score *)p1;
action_score *s2 = (action_score *)p2;
// Most sorting algos do not guarantee the output order of elements that
// compare equal.
// Tie-breaking on the index ensures that the result is deterministic across
// platforms.
// However, this forces a strict ordering, rather than a weak ordering, which
// carries a performance cost.
if (s2->score == s1->score)
return cmp(s1->action, s2->action);
else if (s2->score >= s1->score)
@@ -68,9 +70,11 @@ inline int score_comp(const void* p1, const void* p2)
return 1;
}

inline int reverse_order(const void* p1, const void* p2) { return score_comp(p2, p1); }
inline int reverse_order(const void *p1, const void *p2) {
return score_comp(p2, p1);
}

void print_action_score(int f, v_array<action_score>& a_s, v_array<char>&);
void print_action_score(int f, v_array<action_score> &a_s, v_array<char> &);

void delete_action_scores(void* v);
} // namespace ACTION_SCORE
void delete_action_scores(void *v);
} // namespace ACTION_SCORE

0 comments on commit 22ad628

Please sign in to comment.
You can’t perform that action at this time.