Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

initial commit

  • Loading branch information...
commit 7a812fa9fcfa02dc45c9c9f2b3cda52d068c2672 0 parents
Andy Gross argv0 authored
Showing with 24,252 additions and 0 deletions.
  1. +6 −0 .gitignore
  2. +11 −0 Makefile
  3. +307 −0 c_src/basho_metrics_nifs.cpp
  4. +45 −0 c_src/basho_metrics_nifs.h
  5. +23 −0 c_src/boost.atomic/LICENSE_1_0.txt
  6. +204 −0 c_src/boost.atomic/boost/atomic.hpp
  7. +186 −0 c_src/boost.atomic/boost/atomic/detail/base.hpp
  8. +405 −0 c_src/boost.atomic/boost/atomic/detail/builder.hpp
  9. +76 −0 c_src/boost.atomic/boost/atomic/detail/fallback.hpp
  10. +354 −0 c_src/boost.atomic/boost/atomic/detail/gcc-alpha.hpp
  11. +299 −0 c_src/boost.atomic/boost/atomic/detail/gcc-armv6+.hpp
  12. +351 −0 c_src/boost.atomic/boost/atomic/detail/gcc-ppc.hpp
  13. +454 −0 c_src/boost.atomic/boost/atomic/detail/gcc-x86.hpp
  14. +192 −0 c_src/boost.atomic/boost/atomic/detail/generic-cas.hpp
  15. +292 −0 c_src/boost.atomic/boost/atomic/detail/integral-casts.hpp
  16. +131 −0 c_src/boost.atomic/boost/atomic/detail/interlocked.hpp
  17. +169 −0 c_src/boost.atomic/boost/atomic/detail/linux-arm.hpp
  18. +37 −0 c_src/boost.atomic/boost/atomic/detail/valid_integral_types.hpp
  19. +42 −0 c_src/boost.atomic/boost/atomic/platform.hpp
  20. +131 −0 c_src/boost/boost/assert.hpp
  21. +70 −0 c_src/boost/boost/config.hpp
  22. +27 −0 c_src/boost/boost/config/abi/borland_prefix.hpp
  23. +12 −0 c_src/boost/boost/config/abi/borland_suffix.hpp
  24. +22 −0 c_src/boost/boost/config/abi/msvc_prefix.hpp
  25. +8 −0 c_src/boost/boost/config/abi/msvc_suffix.hpp
  26. +25 −0 c_src/boost/boost/config/abi_prefix.hpp
  27. +27 −0 c_src/boost/boost/config/abi_suffix.hpp
  28. +417 −0 c_src/boost/boost/config/auto_link.hpp
  29. +284 −0 c_src/boost/boost/config/compiler/borland.hpp
  30. +85 −0 c_src/boost/boost/config/compiler/clang.hpp
  31. +178 −0 c_src/boost/boost/config/compiler/codegear.hpp
  32. +59 −0 c_src/boost/boost/config/compiler/comeau.hpp
  33. +100 −0 c_src/boost/boost/config/compiler/common_edg.hpp
  34. +19 −0 c_src/boost/boost/config/compiler/compaq_cxx.hpp
  35. +100 −0 c_src/boost/boost/config/compiler/digitalmars.hpp
  36. +248 −0 c_src/boost/boost/config/compiler/gcc.hpp
  37. +58 −0 c_src/boost/boost/config/compiler/gcc_xml.hpp
  38. +28 −0 c_src/boost/boost/config/compiler/greenhills.hpp
  39. +137 −0 c_src/boost/boost/config/compiler/hp_acc.hpp
  40. +243 −0 c_src/boost/boost/config/compiler/intel.hpp
  41. +33 −0 c_src/boost/boost/config/compiler/kai.hpp
  42. +141 −0 c_src/boost/boost/config/compiler/metrowerks.hpp
  43. +83 −0 c_src/boost/boost/config/compiler/mpw.hpp
  44. +28 −0 c_src/boost/boost/config/compiler/nvcc.hpp
  45. +79 −0 c_src/boost/boost/config/compiler/pathscale.hpp
  46. +79 −0 c_src/boost/boost/config/compiler/pgi.hpp
  47. +29 −0 c_src/boost/boost/config/compiler/sgi_mipspro.hpp
  48. +146 −0 c_src/boost/boost/config/compiler/sunpro_cc.hpp
  49. +119 −0 c_src/boost/boost/config/compiler/vacpp.hpp
  50. +282 −0 c_src/boost/boost/config/compiler/visualc.hpp
  51. +28 −0 c_src/boost/boost/config/no_tr1/cmath.hpp
  52. +28 −0 c_src/boost/boost/config/no_tr1/complex.hpp
  53. +28 −0 c_src/boost/boost/config/no_tr1/functional.hpp
  54. +28 −0 c_src/boost/boost/config/no_tr1/memory.hpp
  55. +28 −0 c_src/boost/boost/config/no_tr1/utility.hpp
  56. +33 −0 c_src/boost/boost/config/platform/aix.hpp
  57. +15 −0 c_src/boost/boost/config/platform/amigaos.hpp
  58. +26 −0 c_src/boost/boost/config/platform/beos.hpp
  59. +86 −0 c_src/boost/boost/config/platform/bsd.hpp
  60. +58 −0 c_src/boost/boost/config/platform/cygwin.hpp
  61. +87 −0 c_src/boost/boost/config/platform/hpux.hpp
  62. +31 −0 c_src/boost/boost/config/platform/irix.hpp
  63. +103 −0 c_src/boost/boost/config/platform/linux.hpp
  64. +87 −0 c_src/boost/boost/config/platform/macos.hpp
  65. +31 −0 c_src/boost/boost/config/platform/qnxnto.hpp
  66. +28 −0 c_src/boost/boost/config/platform/solaris.hpp
  67. +97 −0 c_src/boost/boost/config/platform/symbian.hpp
  68. +25 −0 c_src/boost/boost/config/platform/vms.hpp
  69. +31 −0 c_src/boost/boost/config/platform/vxworks.hpp
  70. +69 −0 c_src/boost/boost/config/platform/win32.hpp
  71. +95 −0 c_src/boost/boost/config/posix_features.hpp
  72. +92 −0 c_src/boost/boost/config/requires_threads.hpp
  73. +108 −0 c_src/boost/boost/config/select_compiler_config.hpp
  74. +101 −0 c_src/boost/boost/config/select_platform_config.hpp
  75. +85 −0 c_src/boost/boost/config/select_stdlib_config.hpp
  76. +145 −0 c_src/boost/boost/config/stdlib/dinkumware.hpp
  77. +69 −0 c_src/boost/boost/config/stdlib/libcomo.hpp
  78. +36 −0 c_src/boost/boost/config/stdlib/libcpp.hpp
  79. +153 −0 c_src/boost/boost/config/stdlib/libstdcpp3.hpp
  80. +53 −0 c_src/boost/boost/config/stdlib/modena.hpp
  81. +81 −0 c_src/boost/boost/config/stdlib/msl.hpp
  82. +183 −0 c_src/boost/boost/config/stdlib/roguewave.hpp
  83. +145 −0 c_src/boost/boost/config/stdlib/sgi.hpp
  84. +244 −0 c_src/boost/boost/config/stdlib/stlport.hpp
  85. +51 −0 c_src/boost/boost/config/stdlib/vacpp.hpp
  86. +671 −0 c_src/boost/boost/config/suffix.hpp
  87. +124 −0 c_src/boost/boost/config/user.hpp
  88. +47 −0 c_src/boost/boost/config/warning_disable.hpp
  89. +508 −0 c_src/boost/boost/cstdint.hpp
  90. +67 −0 c_src/boost/boost/current_function.hpp
  91. +78 −0 c_src/boost/boost/detail/endian.hpp
  92. +449 −0 c_src/boost/boost/detail/limits.hpp
  93. +267 −0 c_src/boost/boost/detail/workaround.hpp
  94. +257 −0 c_src/boost/boost/integer.hpp
  95. +126 −0 c_src/boost/boost/integer/integer_mask.hpp
  96. +127 −0 c_src/boost/boost/integer/static_log2.hpp
  97. +164 −0 c_src/boost/boost/integer_fwd.hpp
  98. +261 −0 c_src/boost/boost/integer_traits.hpp
  99. +146 −0 c_src/boost/boost/limits.hpp
  100. +48 −0 c_src/boost/boost/mpl/aux_/adl_barrier.hpp
  101. +39 −0 c_src/boost/boost/mpl/aux_/arity.hpp
  102. +40 −0 c_src/boost/boost/mpl/aux_/config/adl.hpp
  103. +30 −0 c_src/boost/boost/mpl/aux_/config/arrays.hpp
  104. +30 −0 c_src/boost/boost/mpl/aux_/config/ctps.hpp
  105. +46 −0 c_src/boost/boost/mpl/aux_/config/dtp.hpp
  106. +47 −0 c_src/boost/boost/mpl/aux_/config/eti.hpp
  107. +23 −0 c_src/boost/boost/mpl/aux_/config/gcc.hpp
  108. +38 −0 c_src/boost/boost/mpl/aux_/config/integral.hpp
  109. +21 −0 c_src/boost/boost/mpl/aux_/config/intel.hpp
  110. +32 −0 c_src/boost/boost/mpl/aux_/config/lambda.hpp
  111. +21 −0 c_src/boost/boost/mpl/aux_/config/msvc.hpp
  112. +41 −0 c_src/boost/boost/mpl/aux_/config/nttp.hpp
  113. +29 −0 c_src/boost/boost/mpl/aux_/config/overload_resolution.hpp
  114. +39 −0 c_src/boost/boost/mpl/aux_/config/preprocessor.hpp
  115. +25 −0 c_src/boost/boost/mpl/aux_/config/static_constant.hpp
  116. +41 −0 c_src/boost/boost/mpl/aux_/config/ttp.hpp
  117. +19 −0 c_src/boost/boost/mpl/aux_/config/workaround.hpp
  118. +93 −0 c_src/boost/boost/mpl/aux_/integral_wrapper.hpp
  119. +25 −0 c_src/boost/boost/mpl/aux_/lambda_arity_param.hpp
  120. +169 −0 c_src/boost/boost/mpl/aux_/lambda_support.hpp
  121. +95 −0 c_src/boost/boost/mpl/aux_/na.hpp
  122. +31 −0 c_src/boost/boost/mpl/aux_/na_fwd.hpp
  123. +175 −0 c_src/boost/boost/mpl/aux_/na_spec.hpp
  124. +35 −0 c_src/boost/boost/mpl/aux_/nttp_decl.hpp
  125. +105 −0 c_src/boost/boost/mpl/aux_/preprocessor/def_params_tail.hpp
  126. +62 −0 c_src/boost/boost/mpl/aux_/preprocessor/enum.hpp
  127. +28 −0 c_src/boost/boost/mpl/aux_/preprocessor/filter_params.hpp
  128. +65 −0 c_src/boost/boost/mpl/aux_/preprocessor/params.hpp
  129. +65 −0 c_src/boost/boost/mpl/aux_/preprocessor/sub.hpp
  130. +29 −0 c_src/boost/boost/mpl/aux_/preprocessor/tuple.hpp
  131. +27 −0 c_src/boost/boost/mpl/aux_/static_cast.hpp
  132. +23 −0 c_src/boost/boost/mpl/aux_/template_arity_fwd.hpp
  133. +89 −0 c_src/boost/boost/mpl/aux_/value_wknd.hpp
  134. +58 −0 c_src/boost/boost/mpl/aux_/yes_no.hpp
  135. +39 −0 c_src/boost/boost/mpl/bool.hpp
  136. +33 −0 c_src/boost/boost/mpl/bool_fwd.hpp
  137. +135 −0 c_src/boost/boost/mpl/if.hpp
  138. +22 −0 c_src/boost/boost/mpl/int.hpp
  139. +27 −0 c_src/boost/boost/mpl/int_fwd.hpp
  140. +51 −0 c_src/boost/boost/mpl/integral_c.hpp
  141. +32 −0 c_src/boost/boost/mpl/integral_c_fwd.hpp
  142. +26 −0 c_src/boost/boost/mpl/integral_c_tag.hpp
  143. +57 −0 c_src/boost/boost/mpl/lambda_fwd.hpp
  144. +21 −0 c_src/boost/boost/mpl/limits/arity.hpp
  145. +26 −0 c_src/boost/boost/mpl/void_fwd.hpp
  146. +27 −0 c_src/boost/boost/non_type.hpp
  147. +112 −0 c_src/boost/boost/pending/integer_log2.hpp
  148. +51 −0 c_src/boost/boost/preprocessor/arithmetic/add.hpp
  149. +288 −0 c_src/boost/boost/preprocessor/arithmetic/dec.hpp
  150. +288 −0 c_src/boost/boost/preprocessor/arithmetic/inc.hpp
  151. +50 −0 c_src/boost/boost/preprocessor/arithmetic/sub.hpp
  152. +28 −0 c_src/boost/boost/preprocessor/array/data.hpp
  153. +29 −0 c_src/boost/boost/preprocessor/array/elem.hpp
  154. +28 −0 c_src/boost/boost/preprocessor/array/size.hpp
  155. +35 −0 c_src/boost/boost/preprocessor/cat.hpp
  156. +17 −0 c_src/boost/boost/preprocessor/comma_if.hpp
  157. +70 −0 c_src/boost/boost/preprocessor/config/config.hpp
  158. +536 −0 c_src/boost/boost/preprocessor/control/detail/dmc/while.hpp
  159. +534 −0 c_src/boost/boost/preprocessor/control/detail/edg/while.hpp
  160. +277 −0 c_src/boost/boost/preprocessor/control/detail/msvc/while.hpp
  161. +536 −0 c_src/boost/boost/preprocessor/control/detail/while.hpp
  162. +31 −0 c_src/boost/boost/preprocessor/control/expr_iif.hpp
  163. +30 −0 c_src/boost/boost/preprocessor/control/if.hpp
  164. +34 −0 c_src/boost/boost/preprocessor/control/iif.hpp
  165. +312 −0 c_src/boost/boost/preprocessor/control/while.hpp
  166. +33 −0 c_src/boost/boost/preprocessor/debug/error.hpp
  167. +293 −0 c_src/boost/boost/preprocessor/detail/auto_rec.hpp
  168. +48 −0 c_src/boost/boost/preprocessor/detail/check.hpp
  169. +286 −0 c_src/boost/boost/preprocessor/detail/dmc/auto_rec.hpp
  170. +30 −0 c_src/boost/boost/preprocessor/detail/is_binary.hpp
  171. +17 −0 c_src/boost/boost/preprocessor/empty.hpp
  172. +17 −0 c_src/boost/boost/preprocessor/enum_params.hpp
  173. +21 −0 c_src/boost/boost/preprocessor/facilities/empty.hpp
  174. +23 −0 c_src/boost/boost/preprocessor/facilities/identity.hpp
  175. +17 −0 c_src/boost/boost/preprocessor/identity.hpp
  176. +17 −0 c_src/boost/boost/preprocessor/inc.hpp
  177. +17 −0 c_src/boost/boost/preprocessor/iterate.hpp
  178. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/lower1.hpp
  179. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/lower2.hpp
  180. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/lower3.hpp
  181. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/lower4.hpp
  182. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/lower5.hpp
  183. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/upper1.hpp
  184. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/upper2.hpp
  185. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/upper3.hpp
  186. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/upper4.hpp
  187. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/bounds/upper5.hpp
  188. +99 −0 c_src/boost/boost/preprocessor/iteration/detail/finish.hpp
  189. +1,342 −0 c_src/boost/boost/preprocessor/iteration/detail/iter/forward1.hpp
  190. +1,338 −0 c_src/boost/boost/preprocessor/iteration/detail/iter/forward2.hpp
  191. +1,338 −0 c_src/boost/boost/preprocessor/iteration/detail/iter/forward3.hpp
Sorry, we could not display the entire diff because too many files (334) changed.
6 .gitignore
@@ -0,0 +1,6 @@
+/.eunit/*
+/deps/*
+/priv/*
+*.o
+*.beam
+ebin
11 Makefile
@@ -0,0 +1,11 @@
+
+all: compile
+
+compile:
+ ./rebar compile
+
+test: compile
+ ./rebar eunit
+
+clean:
+ ./rebar clean
307 c_src/basho_metrics_nifs.cpp
@@ -0,0 +1,307 @@
+// -------------------------------------------------------------------
+//
+// basho_metrics_nifs: fast performance metrics for erlang
+//
+// Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
+//
+// This file is provided to you under the Apache License,
+// Version 2.0 (the "License"); you may not use this file
+// except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// -------------------------------------------------------------------
+#include "basho_metrics_nifs.h"
+#include "histogram_metric.hpp"
+#include "meter_metric.hpp"
+#include <vector>
+
+static ErlNifResourceType* histogram_RESOURCE;
+static ErlNifResourceType* counter_RESOURCE;
+static ErlNifResourceType* meter_RESOURCE;
+
+struct counter
+{
+ boost::atomic_uint64_t value;
+};
+
+struct meter_handle
+{
+ meter<> *p;
+};
+
+struct histogram_handle
+{
+ histogram<> *p;
+};
+
+struct counter_handle
+{
+ counter *p;
+};
+
+// Atoms (initialized in on_load)
+static ERL_NIF_TERM ATOM_TRUE;
+static ERL_NIF_TERM ATOM_FALSE;
+static ERL_NIF_TERM ATOM_OK;
+static ERL_NIF_TERM ATOM_ERROR;
+static ERL_NIF_TERM ATOM_NOT_FOUND;
+static ERL_NIF_TERM ATOM_MIN;
+static ERL_NIF_TERM ATOM_MAX;
+static ERL_NIF_TERM ATOM_MEAN;
+static ERL_NIF_TERM ATOM_MEDIAN;
+static ERL_NIF_TERM ATOM_COUNT;
+static ERL_NIF_TERM ATOM_P95;
+static ERL_NIF_TERM ATOM_P99;
+static ERL_NIF_TERM ATOM_P999;
+static ERL_NIF_TERM ATOM_ONE;
+static ERL_NIF_TERM ATOM_FIVE;
+static ERL_NIF_TERM ATOM_FIFTEEN;
+
+static ErlNifFunc nif_funcs[] =
+{
+ {"histogram_new", 0, histogram_new},
+ {"histogram_update", 2, histogram_update},
+ {"histogram_stats", 1, histogram_stats},
+ {"histogram_clear", 1, histogram_clear},
+ {"counter_new", 0, counter_new},
+ {"counter_increment", 1, counter_increment},
+ {"counter_value", 1, counter_value},
+ {"meter_new", 0, meter_new},
+ {"meter_update", 2, meter_update},
+ {"meter_tick", 1, meter_tick},
+ {"meter_stats", 1, meter_stats},
+};
+
+#define ATOM(Id, Value) { Id = enif_make_atom(env, Value); }
+#define STAT_TUPLE(Key, Value) enif_make_tuple2(env, Key, enif_make_long(env, Value))
+
+ERL_NIF_TERM counter_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ counter_handle* handle =
+ (counter_handle *)enif_alloc_resource(counter_RESOURCE,
+ sizeof(counter_handle));
+ memset(handle, '\0', sizeof(counter_handle));
+ handle->p = new counter;
+ ERL_NIF_TERM result = enif_make_resource(env, handle);
+ enif_release_resource(handle);
+ return enif_make_tuple2(env, ATOM_OK, result);
+}
+
+ERL_NIF_TERM counter_increment(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ counter_handle* handle;
+ if (enif_get_resource(env,argv[0], counter_RESOURCE,(void**)&handle))
+ {
+ ++handle->p->value;
+ return ATOM_OK;
+ }
+ else
+ {
+ return enif_make_badarg(env);
+ }
+}
+
+ERL_NIF_TERM counter_value(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ counter_handle* handle;
+ if (enif_get_resource(env,argv[0], counter_RESOURCE,(void**)&handle))
+ return enif_make_uint64(env, handle->p->value);
+ else
+ return enif_make_badarg(env);
+}
+
+
+ERL_NIF_TERM histogram_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ histogram_handle *handle =
+ (histogram_handle *)enif_alloc_resource(histogram_RESOURCE,
+ sizeof(histogram_handle));
+ memset(handle, '\0', sizeof(histogram_handle));
+ handle->p = new histogram<>;
+ ERL_NIF_TERM result = enif_make_resource(env, handle);
+ enif_release_resource(handle);
+ return enif_make_tuple2(env, ATOM_OK, result);
+}
+
+ERL_NIF_TERM histogram_clear(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ histogram_handle* handle;
+ if (enif_get_resource(env,argv[0],histogram_RESOURCE,(void**)&handle))
+ {
+ handle->p->clear();
+ return ATOM_OK;
+ }
+ else
+ {
+ return enif_make_badarg(env);
+ }
+}
+
+ERL_NIF_TERM histogram_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ histogram_handle* handle;
+ unsigned int sample;
+ if (enif_get_resource(env,argv[0],histogram_RESOURCE,(void**)&handle) &&
+ enif_get_uint(env, argv[1], &sample))
+ {
+ handle->p->update(sample);
+ return ATOM_OK;
+ }
+ else
+ {
+ return enif_make_badarg(env);
+ }
+}
+
+ERL_NIF_TERM histogram_stats(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ histogram_handle* handle;
+ if (enif_get_resource(env,argv[0],histogram_RESOURCE,(void**)&handle))
+ {
+ std::vector<double> percentiles;
+ percentiles.push_back(0.950);
+ percentiles.push_back(0.990);
+ percentiles.push_back(0.999);
+ std::vector<double> scores(handle->p->percentiles(percentiles));
+ return enif_make_list7(env,
+ STAT_TUPLE(ATOM_MIN, handle->p->min()),
+ STAT_TUPLE(ATOM_MAX, handle->p->max()),
+ STAT_TUPLE(ATOM_MEAN, handle->p->mean()),
+ STAT_TUPLE(ATOM_COUNT, handle->p->count()),
+ STAT_TUPLE(ATOM_P95, scores[0]),
+ STAT_TUPLE(ATOM_P99, scores[1]),
+ STAT_TUPLE(ATOM_P999, scores[2]));
+ }
+ else
+ return enif_make_badarg(env);
+}
+
+ERL_NIF_TERM meter_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ meter_handle *handle =
+ (meter_handle *)enif_alloc_resource(meter_RESOURCE,
+ sizeof(meter_handle));
+ memset(handle, '\0', sizeof(meter_handle));
+ handle->p = new meter<>;
+ ERL_NIF_TERM result = enif_make_resource(env, handle);
+ enif_release_resource(handle);
+ return enif_make_tuple2(env, ATOM_OK, result);
+}
+
+ERL_NIF_TERM meter_tick(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ meter_handle *handle;
+ if (enif_get_resource(env,argv[0],meter_RESOURCE,(void**)&handle))
+ {
+ handle->p->tick();
+ return ATOM_OK;
+ }
+ else
+ {
+ return enif_make_badarg(env);
+ }
+}
+
+ERL_NIF_TERM meter_update(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ meter_handle* handle;
+ unsigned int sample;
+ if (enif_get_resource(env,argv[0],meter_RESOURCE,(void**)&handle) &&
+ enif_get_uint(env, argv[1], &sample))
+ {
+ handle->p->mark(sample);
+ return ATOM_OK;
+ }
+ else
+ {
+ return enif_make_badarg(env);
+ }
+}
+
+ERL_NIF_TERM meter_stats(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ meter_handle* handle;
+ if (enif_get_resource(env,argv[0],meter_RESOURCE,(void**)&handle))
+ {
+ return enif_make_list3(env,
+ enif_make_tuple2(env,ATOM_ONE,
+ enif_make_double(env,handle->p->one())),
+ enif_make_tuple2(env,ATOM_FIVE,enif_make_double(env, handle->p->five())),
+ enif_make_tuple2(env,ATOM_FIFTEEN,enif_make_double(env, handle->p->fifteen())));
+ }
+ else
+ return enif_make_badarg(env);
+}
+
+static void histogram_resource_cleanup(ErlNifEnv* env, void* arg)
+{
+ histogram_handle* handle = (histogram_handle*)arg;
+ delete handle->p;
+}
+
+static void meter_resource_cleanup(ErlNifEnv* env, void* arg)
+{
+ meter_handle* handle = (meter_handle*)arg;
+ delete handle->p;
+}
+
+static void counter_resource_cleanup(ErlNifEnv* env, void* arg)
+{
+ counter_handle* handle = (counter_handle*)arg;
+ delete handle->p;
+}
+
+static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ ErlNifResourceFlags flags = (ErlNifResourceFlags)
+ (ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER);
+ histogram_RESOURCE = enif_open_resource_type(env,
+ NULL,
+ "histogram_resource",
+ &histogram_resource_cleanup,
+ flags,
+ NULL);
+ counter_RESOURCE = enif_open_resource_type(env,
+ NULL,
+ "counter_resource",
+ &counter_resource_cleanup,
+ flags,
+ NULL);
+ meter_RESOURCE = enif_open_resource_type(env,
+ NULL,
+ "meter_resource",
+ &meter_resource_cleanup,
+ flags,
+ NULL);
+ // Initialize common atoms
+ ATOM(ATOM_OK, "ok");
+ ATOM(ATOM_ERROR, "error");
+ ATOM(ATOM_TRUE, "true");
+ ATOM(ATOM_FALSE, "false");
+ ATOM(ATOM_NOT_FOUND, "not_found");
+ ATOM(ATOM_MIN, "min");
+ ATOM(ATOM_MAX, "max");
+ ATOM(ATOM_MEAN, "mean");
+ ATOM(ATOM_MEDIAN, "median");
+ ATOM(ATOM_COUNT, "count");
+ ATOM(ATOM_P95, "p95");
+ ATOM(ATOM_P99, "p99");
+ ATOM(ATOM_P999, "p999");
+ ATOM(ATOM_ONE, "one");
+ ATOM(ATOM_FIVE, "five");
+ ATOM(ATOM_FIFTEEN, "fifteen");
+ return 0;
+}
+
+extern "C" {
+ ERL_NIF_INIT(basho_metrics_nifs, nif_funcs, &on_load, NULL, NULL, NULL);
+}
45 c_src/basho_metrics_nifs.h
@@ -0,0 +1,45 @@
+// -------------------------------------------------------------------
+//
+//
+//
+// Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
+//
+// This file is provided to you under the Apache License,
+// Version 2.0 (the "License"); you may not use this file
+// except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+// -------------------------------------------------------------------
+#ifndef INCL_ELEVELDB_H
+#define INCL_ELEVELDB_H
+
+extern "C" {
+
+#include "erl_nif_compat.h"
+
+ERL_NIF_TERM histogram_update(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM histogram_new(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM histogram_stats(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM histogram_clear(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
+ERL_NIF_TERM counter_new(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM counter_increment(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM counter_value(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
+ERL_NIF_TERM meter_new(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM meter_update(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM meter_tick(ErlNifEnv*,int, const ERL_NIF_TERM[]);
+ERL_NIF_TERM meter_stats(ErlNifEnv*, int, const ERL_NIF_TERM[]);
+
+} // extern "C"
+
+#endif // include guard
23 c_src/boost.atomic/LICENSE_1_0.txt
@@ -0,0 +1,23 @@
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
204 c_src/boost.atomic/boost/atomic.hpp
@@ -0,0 +1,204 @@
+#ifndef BOOST_ATOMIC_HPP
+#define BOOST_ATOMIC_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <cstddef>
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/platform.hpp>
+#include <boost/atomic/detail/base.hpp>
+#include <boost/atomic/detail/integral-casts.hpp>
+
+namespace boost {
+
+template<typename T>
+class atomic : public detail::atomic::internal_atomic<T> {
+public:
+ typedef detail::atomic::internal_atomic<T> super;
+
+ atomic() {}
+ explicit atomic(T v) : super(v) {}
+private:
+ atomic(const atomic &);
+ void operator=(const atomic &);
+};
+
+
+template<>
+class atomic<bool> : private detail::atomic::internal_atomic<bool> {
+public:
+ typedef detail::atomic::internal_atomic<bool> super;
+
+ atomic() {}
+ explicit atomic(bool v) : super(v) {}
+
+ using super::load;
+ using super::store;
+ using super::compare_exchange_strong;
+ using super::compare_exchange_weak;
+ using super::exchange;
+ using super::is_lock_free;
+
+ operator bool(void) const volatile {return load();}
+ bool operator=(bool v) volatile {store(v); return v;}
+private:
+ atomic(const atomic &);
+ void operator=(const atomic &);
+};
+
+template<>
+class atomic<void *> : private detail::atomic::internal_atomic<void *, sizeof(void *), int> {
+public:
+ typedef detail::atomic::internal_atomic<void *, sizeof(void *), int> super;
+
+ atomic() {}
+ explicit atomic(void * p) : super(p) {}
+ using super::load;
+ using super::store;
+ using super::compare_exchange_strong;
+ using super::compare_exchange_weak;
+ using super::exchange;
+ using super::is_lock_free;
+
+ operator void *(void) const volatile {return load();}
+ void * operator=(void * v) volatile {store(v); return v;}
+
+private:
+ atomic(const atomic &);
+ void * operator=(const atomic &);
+};
+
+/* FIXME: pointer arithmetic still missing */
+
+template<typename T>
+class atomic<T *> : private detail::atomic::internal_atomic<intptr_t> {
+public:
+ typedef detail::atomic::internal_atomic<intptr_t> super;
+
+ atomic() {}
+ explicit atomic(T * p) : super((intptr_t)p) {}
+
+ T *load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ return (T*)super::load(order);
+ }
+ void store(T *v, memory_order order=memory_order_seq_cst) volatile
+ {
+ super::store((intptr_t)v, order);
+ }
+ bool compare_exchange_strong(
+ T * &expected,
+ T * desired,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return compare_exchange_strong(expected, desired, order, detail::atomic::calculate_failure_order(order));
+ }
+ bool compare_exchange_weak(
+ T * &expected,
+ T *desired,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return compare_exchange_weak(expected, desired, order, detail::atomic::calculate_failure_order(order));
+ }
+ bool compare_exchange_weak(
+ T * &expected,
+ T *desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ intptr_t expected_=(intptr_t)expected, desired_=(intptr_t)desired;
+ bool success=super::compare_exchange_weak(expected_, desired_, success_order, failure_order);
+ expected=(T*)expected_;
+ return success;
+ }
+ bool compare_exchange_strong(
+ T * &expected,
+ T *desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ intptr_t expected_=(intptr_t)expected, desired_=(intptr_t)desired;
+ bool success=super::compare_exchange_strong(expected_, desired_, success_order, failure_order);
+ expected=(T*)expected_;
+ return success;
+ }
+ T *exchange(T * replacement, memory_order order=memory_order_seq_cst) volatile
+ {
+ return (T*)super::exchange((intptr_t)replacement, order);
+ }
+ using super::is_lock_free;
+
+ operator T *(void) const volatile {return load();}
+ T * operator=(T * v) volatile {store(v); return v;}
+
+ T * fetch_add(ptrdiff_t diff, memory_order order=memory_order_seq_cst) volatile
+ {
+ return (T*)super::fetch_add(diff*sizeof(T), order);
+ }
+ T * fetch_sub(ptrdiff_t diff, memory_order order=memory_order_seq_cst) volatile
+ {
+ return (T*)super::fetch_sub(diff*sizeof(T), order);
+ }
+
+ T *operator++(void) volatile {return fetch_add(1)+1;}
+ T *operator++(int) volatile {return fetch_add(1);}
+ T *operator--(void) volatile {return fetch_sub(1)-1;}
+ T *operator--(int) volatile {return fetch_sub(1);}
+private:
+ atomic(const atomic &);
+ T * operator=(const atomic &);
+};
+
+class atomic_flag : private atomic<int> {
+public:
+ typedef atomic<int> super;
+ using super::is_lock_free;
+
+ atomic_flag(bool initial_state) : super(initial_state?1:0) {}
+ atomic_flag() {}
+
+ bool test_and_set(memory_order order=memory_order_seq_cst)
+ {
+ return super::exchange(1, order) != 0;
+ }
+ void clear(memory_order order=memory_order_seq_cst)
+ {
+ super::store(0, order);
+ }
+};
+
+typedef atomic<char> atomic_char;
+typedef atomic<unsigned char> atomic_uchar;
+typedef atomic<signed char> atomic_schar;
+typedef atomic<uint8_t> atomic_uint8_t;
+typedef atomic<int8_t> atomic_int8_t;
+typedef atomic<unsigned short> atomic_ushort;
+typedef atomic<short> atomic_short;
+typedef atomic<uint16_t> atomic_uint16_t;
+typedef atomic<int16_t> atomic_int16_t;
+typedef atomic<unsigned int> atomic_uint;
+typedef atomic<int> atomic_int;
+typedef atomic<uint32_t> atomic_uint32_t;
+typedef atomic<int32_t> atomic_int32_t;
+typedef atomic<unsigned long> atomic_ulong;
+typedef atomic<long> atomic_long;
+typedef atomic<uint64_t> atomic_uint64_t;
+typedef atomic<int64_t> atomic_int64_t;
+typedef atomic<unsigned long long> atomic_ullong;
+typedef atomic<long long> atomic_llong;
+typedef atomic<void*> atomic_address;
+typedef atomic<bool> atomic_bool;
+
+static inline void atomic_thread_fence(memory_order order)
+{
+ detail::atomic::platform_atomic_thread_fence<memory_order>(order);
+}
+
+}
+
+#endif
186 c_src/boost.atomic/boost/atomic/detail/base.hpp
@@ -0,0 +1,186 @@
+#ifndef BOOST_DETAIL_ATOMIC_BASE_HPP
+#define BOOST_DETAIL_ATOMIC_BASE_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/detail/fallback.hpp>
+#include <boost/atomic/detail/builder.hpp>
+#include <boost/atomic/detail/valid_integral_types.hpp>
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+static inline memory_order calculate_failure_order(memory_order order)
+{
+ switch(order) {
+ case memory_order_acq_rel: return memory_order_acquire;
+ case memory_order_release: return memory_order_relaxed;
+ default: return order;
+ }
+}
+
+template<typename T, unsigned short Size=sizeof(T)>
+class platform_atomic : public fallback_atomic<T> {
+public:
+ typedef fallback_atomic<T> super;
+
+ explicit platform_atomic(T v) : super(v) {}
+ platform_atomic() {}
+protected:
+ typedef typename super::integral_type integral_type;
+};
+
+template<typename T, unsigned short Size=sizeof(T)>
+class platform_atomic_integral : public build_atomic_from_exchange<fallback_atomic<T> > {
+public:
+ typedef build_atomic_from_exchange<fallback_atomic<T> > super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral() {}
+protected:
+ typedef typename super::integral_type integral_type;
+};
+
+template<typename T>
+static inline void platform_atomic_thread_fence(T order)
+{
+ /* FIXME: this does not provide
+ sequential consistency, need one global
+ variable for that... */
+ platform_atomic<int> a;
+ a.exchange(0, order);
+}
+
+template<typename T, unsigned short Size=sizeof(T), typename Int=typename is_integral_type<T>::test>
+class internal_atomic;
+
+template<typename T, unsigned short Size>
+class internal_atomic<T, Size, void> : private detail::atomic::platform_atomic<T> {
+public:
+ typedef detail::atomic::platform_atomic<T> super;
+
+ internal_atomic() {}
+ explicit internal_atomic(T v) : super(v) {}
+
+ operator T(void) const volatile {return load();}
+ T operator=(T v) volatile {store(v); return v;}
+
+ using super::is_lock_free;
+ using super::load;
+ using super::store;
+ using super::exchange;
+
+ bool compare_exchange_strong(
+ T &expected,
+ T desired,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
+ }
+ bool compare_exchange_strong(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+private:
+ internal_atomic(const internal_atomic &);
+ void operator=(const internal_atomic &);
+};
+
+template<typename T, unsigned short Size>
+class internal_atomic<T, Size, int> : private detail::atomic::platform_atomic_integral<T> {
+public:
+ typedef detail::atomic::platform_atomic_integral<T> super;
+ typedef typename super::integral_type integral_type;
+
+ internal_atomic() {}
+ explicit internal_atomic(T v) : super(v) {}
+
+ using super::is_lock_free;
+ using super::load;
+ using super::store;
+ using super::exchange;
+ using super::fetch_add;
+ using super::fetch_sub;
+ using super::fetch_and;
+ using super::fetch_or;
+ using super::fetch_xor;
+
+ operator integral_type(void) const volatile {return load();}
+ integral_type operator=(integral_type v) volatile {store(v); return v;}
+
+ integral_type operator&=(integral_type c) volatile {return fetch_and(c)&c;}
+ integral_type operator|=(integral_type c) volatile {return fetch_or(c)|c;}
+ integral_type operator^=(integral_type c) volatile {return fetch_xor(c)^c;}
+
+ integral_type operator+=(integral_type c) volatile {return fetch_add(c)+c;}
+ integral_type operator-=(integral_type c) volatile {return fetch_sub(c)-c;}
+
+ integral_type operator++(void) volatile {return fetch_add(1)+1;}
+ integral_type operator++(int) volatile {return fetch_add(1);}
+ integral_type operator--(void) volatile {return fetch_sub(1)-1;}
+ integral_type operator--(int) volatile {return fetch_sub(1);}
+
+ bool compare_exchange_strong(
+ integral_type &expected,
+ integral_type desired,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
+ }
+ bool compare_exchange_weak(
+ integral_type &expected,
+ integral_type desired,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
+ }
+ bool compare_exchange_strong(
+ integral_type &expected,
+ integral_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+ bool compare_exchange_weak(
+ integral_type &expected,
+ integral_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return super::compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+private:
+ internal_atomic(const internal_atomic &);
+ void operator=(const internal_atomic &);
+};
+
+}
+}
+}
+
+#endif
405 c_src/boost.atomic/boost/atomic/detail/builder.hpp
@@ -0,0 +1,405 @@
+#ifndef BOOST_DETAIL_ATOMIC_BUILDER_HPP
+#define BOOST_DETAIL_ATOMIC_BUILDER_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/detail/endian.hpp>
+#include <boost/atomic/detail/valid_integral_types.hpp>
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+
+generates exchange and compare_exchange_strong
+*/
+template<typename Base>
+class build_exchange : public Base {
+public:
+ typedef typename Base::integral_type integral_type;
+
+ using Base::load;
+ using Base::compare_exchange_weak;
+
+ bool compare_exchange_strong(
+ integral_type &expected,
+ integral_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ integral_type expected_save=expected;
+ while(true) {
+ if (compare_exchange_weak(expected, desired, success_order, failure_order)) return true;
+ if (expected_save!=expected) return false;
+ expected=expected_save;
+ }
+ }
+
+ integral_type exchange(integral_type replacement, memory_order order=memory_order_seq_cst) volatile
+ {
+ integral_type o=load(memory_order_relaxed);
+ do {} while(!compare_exchange_weak(o, replacement, order, memory_order_relaxed));
+ return o;
+ }
+
+ build_exchange() {}
+ explicit build_exchange(integral_type i) : Base(i) {}
+};
+
+/*
+given a Base that implements:
+
+- fetch_add_var(integral_type c, memory_order order)
+- fetch_inc(memory_order order)
+- fetch_dec(memory_order order)
+
+creates a fetch_add method that delegates to fetch_inc/fetch_dec if operand
+is constant +1/-1, and uses fetch_add_var otherwise
+
+the intention is to allow optimizing the incredibly common case of +1/-1
+*/
+template<typename Base>
+class build_const_fetch_add : public Base {
+public:
+ typedef typename Base::integral_type integral_type;
+
+ integral_type fetch_add(
+ integral_type c,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ if (__builtin_constant_p(c)) {
+ switch(c) {
+ case -1: return fetch_dec(order);
+ case 1: return fetch_inc(order);
+ }
+ }
+ return fetch_add_var(c, order);
+ }
+
+ build_const_fetch_add() {}
+ explicit build_const_fetch_add(integral_type i) : Base(i) {}
+protected:
+ using Base::fetch_add_var;
+ using Base::fetch_inc;
+ using Base::fetch_dec;
+};
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+
+generates a -- not very efficient, but correct -- fetch_add operation
+*/
+template<typename Base>
+class build_fetch_add : public Base {
+public:
+ typedef typename Base::integral_type integral_type;
+
+ using Base::compare_exchange_weak;
+
+ integral_type fetch_add(
+ integral_type c, memory_order order=memory_order_seq_cst) volatile
+ {
+ integral_type o=Base::load(memory_order_relaxed), n;
+ do {n=o+c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
+ return o;
+ }
+
+ build_fetch_add() {}
+ explicit build_fetch_add(integral_type i) : Base(i) {}
+};
+
+/*
+given a Base that implements:
+
+- fetch_add(integral_type c, memory_order order)
+
+generates fetch_sub and post/pre- increment/decrement operators
+*/
+template<typename Base>
+class build_arithmeticops : public Base {
+public:
+ typedef typename Base::integral_type integral_type;
+
+ using Base::fetch_add;
+
+ integral_type fetch_sub(
+ integral_type c,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ return fetch_add(-c, order);
+ }
+
+ build_arithmeticops() {}
+ explicit build_arithmeticops(integral_type i) : Base(i) {}
+};
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+
+generates -- not very efficient, but correct -- fetch_and, fetch_or and fetch_xor operators
+*/
+template<typename Base>
+class build_logicops : public Base {
+public:
+ typedef typename Base::integral_type integral_type;
+
+ using Base::compare_exchange_weak;
+ using Base::load;
+
+ integral_type fetch_and(integral_type c, memory_order order=memory_order_seq_cst) volatile
+ {
+ integral_type o=load(memory_order_relaxed), n;
+ do {n=o&c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
+ return o;
+ }
+ integral_type fetch_or(integral_type c, memory_order order=memory_order_seq_cst) volatile
+ {
+ integral_type o=load(memory_order_relaxed), n;
+ do {n=o|c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
+ return o;
+ }
+ integral_type fetch_xor(integral_type c, memory_order order=memory_order_seq_cst) volatile
+ {
+ integral_type o=load(memory_order_relaxed), n;
+ do {n=o^c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
+ return o;
+ }
+
+ build_logicops() {}
+ build_logicops(integral_type i) : Base(i) {}
+};
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- store(integral_type i, memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+
+generates the full set of atomic operations for integral types
+*/
+template<typename Base>
+class build_atomic_from_minimal : public build_logicops< build_arithmeticops< build_fetch_add< build_exchange<Base> > > > {
+public:
+ typedef build_logicops< build_arithmeticops< build_fetch_add< build_exchange<Base> > > > super;
+ typedef typename super::integral_type integral_type;
+
+ build_atomic_from_minimal(void) {}
+ build_atomic_from_minimal(typename super::integral_type i) : super(i) {}
+};
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- store(integral_type i, memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order)
+- exchange(integral_type replacement, memory_order order)
+- fetch_add_var(integral_type c, memory_order order)
+- fetch_inc(memory_order order)
+- fetch_dec(memory_order order)
+
+generates the full set of atomic operations for integral types
+*/
+template<typename Base>
+class build_atomic_from_typical : public build_logicops< build_arithmeticops< build_const_fetch_add<Base> > > {
+public:
+ typedef build_logicops< build_arithmeticops< build_const_fetch_add<Base> > > super;
+ typedef typename super::integral_type integral_type;
+
+ build_atomic_from_typical(void) {}
+ build_atomic_from_typical(typename super::integral_type i) : super(i) {}
+};
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- store(integral_type i, memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order)
+- exchange(integral_type replacement, memory_order order)
+- fetch_add(integral_type c, memory_order order)
+
+generates the full set of atomic operations for integral types
+*/
+template<typename Base>
+class build_atomic_from_add : public build_logicops< build_arithmeticops<Base> > {
+public:
+ typedef build_logicops< build_arithmeticops<Base> > super;
+ typedef typename super::integral_type integral_type;
+
+ build_atomic_from_add(void) {}
+ build_atomic_from_add(typename super::integral_type i) : super(i) {}
+};
+
+/*
+given a Base that implements:
+
+- load(memory_order order)
+- store(integral_type i, memory_order order)
+- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
+- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order)
+- exchange(integral_type replacement, memory_order order)
+
+generates the full set of atomic operations for integral types
+*/
+template<typename Base>
+class build_atomic_from_exchange : public build_logicops< build_arithmeticops< build_fetch_add<Base> > > {
+public:
+ typedef build_logicops< build_arithmeticops< build_fetch_add<Base> > > super;
+ typedef typename super::integral_type integral_type;
+
+ build_atomic_from_exchange(void) {}
+ build_atomic_from_exchange(typename super::integral_type i) : super(i) {}
+};
+
+
+/*
+given a Base that implements:
+
+- compare_exchange_weak()
+
+generates load, store and compare_exchange_weak for a smaller
+data type (e.g. an atomic "byte" embedded into a temporary
+and properly aligned atomic "int").
+*/
+template<typename Base, typename Type>
+class build_base_from_larger_type {
+public:
+ typedef Type integral_type;
+
+ build_base_from_larger_type() {}
+ build_base_from_larger_type(integral_type t) {store(t, memory_order_relaxed);}
+
+ integral_type load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ larger_integral_type v=get_base().load(order);
+ return extract(v);
+ }
+ bool compare_exchange_weak(integral_type &expected,
+ integral_type desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ larger_integral_type expected_;
+ larger_integral_type desired_;
+
+ expected_=get_base().load(memory_order_relaxed);
+ expected_=insert(expected_, expected);
+ desired_=insert(expected_, desired);
+ bool success=get_base().compare_exchange_weak(expected_, desired_, success_order, failure_order);
+ expected=extract(expected_);
+ return success;
+ }
+ void store(integral_type v,
+ memory_order order=memory_order_seq_cst) volatile
+ {
+ larger_integral_type expected, desired;
+ expected=get_base().load(memory_order_relaxed);
+ do {
+ desired=insert(expected, v);
+ } while(!get_base().compare_exchange_weak(expected, desired, order, memory_order_relaxed));
+ }
+
+ bool is_lock_free(void)
+ {
+ return get_base().is_lock_free();
+ }
+private:
+ typedef typename Base::integral_type larger_integral_type;
+
+ const Base &get_base(void) const volatile
+ {
+ intptr_t address=(intptr_t)this;
+ address&=~(sizeof(larger_integral_type)-1);
+ return *reinterpret_cast<const Base *>(address);
+ }
+ Base &get_base(void) volatile
+ {
+ intptr_t address=(intptr_t)this;
+ address&=~(sizeof(larger_integral_type)-1);
+ return *reinterpret_cast<Base *>(address);
+ }
+ unsigned int get_offset(void) const volatile
+ {
+ intptr_t address=(intptr_t)this;
+ address&=(sizeof(larger_integral_type)-1);
+ return address;
+ }
+
+ unsigned int get_shift(void) const volatile
+ {
+#if defined(BOOST_LITTLE_ENDIAN)
+ return get_offset()*8;
+#elif defined(BOOST_BIG_ENDIAN)
+ return (sizeof(larger_integral_type)-sizeof(integral_type)-get_offset())*8;
+#else
+ #error "Unknown endian"
+#endif
+ }
+
+ integral_type extract(larger_integral_type v) const volatile
+ {
+ return v>>get_shift();
+ }
+
+ larger_integral_type insert(larger_integral_type target, integral_type source) const volatile
+ {
+ larger_integral_type tmp=source;
+ larger_integral_type mask=(larger_integral_type)-1;
+
+ mask=~(mask<<(8*sizeof(integral_type)));
+
+ mask=mask<<get_shift();
+ tmp=tmp<<get_shift();
+
+ tmp=(tmp & mask) | (target & ~mask);
+
+ return tmp;
+ }
+
+ integral_type i;
+};
+
+/*
+given a Base that implements:
+
+- compare_exchange_weak()
+
+generates the full set of atomic ops for a smaller
+data type (e.g. an atomic "byte" embedded into a temporary
+and properly aligned atomic "int").
+*/
+template<typename Base, typename Type>
+class build_atomic_from_larger_type : public build_atomic_from_minimal< build_base_from_larger_type<Base, Type> > {
+public:
+ typedef build_atomic_from_minimal< build_base_from_larger_type<Base, Type> > super;
+ //typedef typename super::integral_type integral_type;
+ typedef Type integral_type;
+
+ build_atomic_from_larger_type() {}
+ build_atomic_from_larger_type(integral_type v) : super(v) {}
+};
+
+}
+}
+}
+
+#endif
76 c_src/boost.atomic/boost/atomic/detail/fallback.hpp
@@ -0,0 +1,76 @@
+#ifndef BOOST_DETAIL_ATOMIC_FALLBACK_HPP
+#define BOOST_DETAIL_ATOMIC_FALLBACK_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <string.h>
+#include <boost/smart_ptr/detail/spinlock_pool.hpp>
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+template<typename T>
+class fallback_atomic {
+public:
+ fallback_atomic(void) {}
+ explicit fallback_atomic(const T &t) {memcpy(&i, &t, sizeof(T));}
+
+ void store(const T &t, memory_order order=memory_order_seq_cst) volatile
+ {
+ detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
+ memcpy((void*)&i, &t, sizeof(T));
+ }
+ T load(memory_order /*order*/=memory_order_seq_cst) volatile const
+ {
+ detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
+ T tmp;
+ memcpy(&tmp, (T*)&i, sizeof(T));
+ return tmp;
+ }
+ bool compare_exchange_strong(
+ T &expected,
+ T desired,
+ memory_order /*success_order*/,
+ memory_order /*failure_order*/) volatile
+ {
+ detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
+ if (memcmp((void*)&i, &expected, sizeof(T))==0) {
+ memcpy((void*)&i, &desired, sizeof(T));
+ return true;
+ } else {
+ memcpy(&expected, (void*)&i, sizeof(T));
+ return false;
+ }
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ return compare_exchange_strong(expected, desired, success_order, failure_order);
+ }
+ T exchange(T replacement, memory_order /*order*/=memory_order_seq_cst) volatile
+ {
+ detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
+ T tmp;
+ memcpy(&tmp, (void*)&i, sizeof(T));
+ memcpy((void*)&i, &replacement, sizeof(T));
+ return tmp;
+ }
+ bool is_lock_free(void) const volatile {return false;}
+protected:
+ T i;
+ typedef T integral_type;
+};
+
+}
+}
+}
+
+#endif
354 c_src/boost.atomic/boost/atomic/detail/gcc-alpha.hpp
@@ -0,0 +1,354 @@
+#ifndef BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
+#define BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/detail/base.hpp>
+#include <boost/atomic/detail/builder.hpp>
+
+/*
+ Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
+ (HP OpenVMS systems documentation) and the alpha reference manual.
+ */
+
+/*
+ NB: The most natural thing would be to write the increment/decrement
+ operators along the following lines:
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%1 \n"
+ "addl %0,1,%0 \n"
+ "stl_c %0,%1 \n"
+ "beq %0,1b\n"
+ : "=&b" (tmp)
+ : "m" (value)
+ : "cc"
+ );
+
+ However according to the comments on the HP website and matching
+ comments in the Linux kernel sources this defies branch prediction,
+ as the cpu assumes that backward branches are always taken; so
+ instead copy the trick from the Linux kernel, introduce a forward
+ branch and back again.
+
+ I have, however, had a hard time measuring the difference between
+ the two versions in microbenchmarks -- I am leaving it in nevertheless
+ as it apparently does not hurt either.
+*/
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+static inline void fence_before(memory_order order)
+{
+ switch(order) {
+ case memory_order_consume:
+ case memory_order_release:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("mb" ::: "memory");
+ default:;
+ }
+}
+
+static inline void fence_after(memory_order order)
+{
+ switch(order) {
+ case memory_order_acquire:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("mb" ::: "memory");
+ default:;
+ }
+}
+
+template<>
+inline void platform_atomic_thread_fence(memory_order order)
+{
+ switch(order) {
+ case memory_order_acquire:
+ case memory_order_consume:
+ case memory_order_release:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("mb" ::: "memory");
+ default:;
+ }
+}
+
+template<typename T>
+class atomic_alpha_32 {
+public:
+ typedef T integral_type;
+ explicit atomic_alpha_32(T v) : i(v) {}
+ atomic_alpha_32() {}
+ T load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ T v=*reinterpret_cast<volatile const int *>(&i);
+ fence_after(order);
+ return v;
+ }
+ void store(T v, memory_order order=memory_order_seq_cst) volatile
+ {
+ fence_before(order);
+ *reinterpret_cast<volatile int *>(&i)=(int)v;
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ fence_before(success_order);
+ int current, success;
+ __asm__ __volatile__(
+ "1: ldl_l %2, %4\n"
+ "cmpeq %2, %0, %3\n"
+ "mov %2, %0\n"
+ "beq %3, 3f\n"
+ "stl_c %1, %4\n"
+ "2:\n"
+
+ ".subsection 2\n"
+ "3: mov %3, %1\n"
+ "br 2b\n"
+ ".previous\n"
+
+ : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
+ : "m" (i)
+ :
+ );
+ if (desired) fence_after(success_order);
+ else fence_after(failure_order);
+ return desired;
+ }
+
+ bool is_lock_free(void) const volatile {return true;}
+protected:
+ inline T fetch_add_var(T c, memory_order order) volatile
+ {
+ fence_before(order);
+ T original, modified;
+ __asm__ __volatile__(
+ "1: ldl_l %0, %2\n"
+ "addl %0, %3, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), "=&r" (modified)
+ : "m" (i), "r" (c)
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_inc(memory_order order) volatile
+ {
+ fence_before(order);
+ int original, modified;
+ __asm__ __volatile__(
+ "1: ldl_l %0, %2\n"
+ "addl %0, 1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), "=&r" (modified)
+ : "m" (i)
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_dec(memory_order order) volatile
+ {
+ fence_before(order);
+ int original, modified;
+ __asm__ __volatile__(
+ "1: ldl_l %0, %2\n"
+ "subl %0, 1, %1\n"
+ "stl_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), "=&r" (modified)
+ : "m" (i)
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+private:
+ T i;
+};
+
+template<typename T>
+class atomic_alpha_64 {
+public:
+ typedef T integral_type;
+ explicit atomic_alpha_64(T v) : i(v) {}
+ atomic_alpha_64() {}
+ T load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ T v=*reinterpret_cast<volatile const T *>(&i);
+ fence_after(order);
+ return v;
+ }
+ void store(T v, memory_order order=memory_order_seq_cst) volatile
+ {
+ fence_before(order);
+ *reinterpret_cast<volatile T *>(&i)=v;
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ fence_before(success_order);
+ int current, success;
+ __asm__ __volatile__(
+ "1: ldq_l %2, %4\n"
+ "cmpeq %2, %0, %3\n"
+ "mov %2, %0\n"
+ "beq %3, 3f\n"
+ "stq_c %1, %4\n"
+ "2:\n"
+
+ ".subsection 2\n"
+ "3: mov %3, %1\n"
+ "br 2b\n"
+ ".previous\n"
+
+ : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
+ : "m" (i)
+ :
+ );
+ if (desired) fence_after(success_order);
+ else fence_after(failure_order);
+ return desired;
+ }
+
+ bool is_lock_free(void) const volatile {return true;}
+protected:
+ inline T fetch_add_var(T c, memory_order order) volatile
+ {
+ fence_before(order);
+ T original, modified;
+ __asm__ __volatile__(
+ "1: ldq_l %0, %2\n"
+ "addq %0, %3, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), "=&r" (modified)
+ : "m" (i), "r" (c)
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_inc(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, modified;
+ __asm__ __volatile__(
+ "1: ldq_l %0, %2\n"
+ "addq %0, 1, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), "=&r" (modified)
+ : "m" (i)
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_dec(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, modified;
+ __asm__ __volatile__(
+ "1: ldq_l %0, %2\n"
+ "subq %0, 1, %1\n"
+ "stq_c %1, %2\n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous\n"
+
+ : "=&r" (original), "=&r" (modified)
+ : "m" (i)
+ :
+ );
+ fence_after(order);
+ return original;
+ }
+private:
+ T i;
+};
+
+template<typename T>
+class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > {
+public:
+ typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > {
+public:
+ typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
+public:
+ typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
+public:
+ typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+}
+}
+}
+
+#endif
299 c_src/boost.atomic/boost/atomic/detail/gcc-armv6+.hpp
@@ -0,0 +1,299 @@
+#ifndef BOOST_DETAIL_ATOMIC_GCC_ARMV6P_HPP
+#define BOOST_DETAIL_ATOMIC_GCC_ARMV6P_HPP
+
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+//
+// Copyright (c) 2009 Helge Bahmann
+// Copyright (c) 2009 Phil Endecott
+// ARM Code by Phil Endecott, based on other architectures.
+
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/base.hpp>
+#include <boost/atomic/detail/builder.hpp>
+
+// From the ARM Architecture Reference Manual for architecture v6:
+//
+// LDREX{<cond>} <Rd>, [<Rn>]
+// <Rd> Specifies the destination register for the memory word addressed by <Rd>
+// <Rn> Specifies the register containing the address.
+//
+// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
+// <Rd> Specifies the destination register for the returned status value.
+// 0 if the operation updates memory
+// 1 if the operation fails to update memory
+// <Rm> Specifies the register containing the word to be stored to memory.
+// <Rn> Specifies the register containing the address.
+// Rd must not be the same register is Rm or Rn.
+//
+// ARM v7 is like ARM v6 plus:
+// There are half-word and byte versions of the LDREX and STREX instructions,
+// LDREXH, LDREXB, STREXH and STREXB.
+// There are also double-word versions, LDREXD and STREXD.
+// (Actually it looks like these are available from version 6k onwards.)
+// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
+// I think you can supply an immediate offset to the address.
+//
+// A memory barrier is effected using a "co-processor 15" instruction,
+// though a separate assembler mnemonic is available for it in v7.
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+
+// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
+// doesn't include all instructions and in particular it doesn't include the co-processor
+// instruction used for the memory barrier or the load-locked/store-conditional
+// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
+// asm blocks with code to temporarily change to ARM mode.
+//
+// You can only change between ARM and Thumb modes when branching using the bx instruction.
+// bx takes an address specified in a register. The least significant bit of the address
+// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
+// A temporary register is needed for the address and is passed as an argument to these
+// macros. It must be one of the "low" registers accessible to Thumb code, specified
+// usng the "l" attribute in the asm statement.
+//
+// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
+// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal
+// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
+// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
+// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
+// so they can always be present.
+
+#if defined(__thumb__) && !defined(__ARM_ARCH_7A__)
+// FIXME also other v7 variants.
+#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: "
+#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: "
+
+#else
+// The tmpreg is wasted in this case, which is non-optimal.
+#define BOOST_ATOMIC_ARM_ASM_START(TMPREG)
+#define BOOST_ATOMIC_ARM_ASM_END(TMPREG)
+#endif
+
+
+#if defined(__ARM_ARCH_7A__)
+// FIXME ditto.
+#define BOOST_ATOMIC_ARM_DMB "dmb\n"
+#else
+#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n"
+#endif
+
+// There is also a "Data Synchronisation Barrier" DSB; this exists in v6 as another co-processor
+// instruction like the above.
+
+
+static inline void fence_before(memory_order order)
+{
+ // FIXME I don't understand enough about barriers to know what this should do.
+ switch(order) {
+ case memory_order_release:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ int brtmp;
+ __asm__ __volatile__ (
+ BOOST_ATOMIC_ARM_ASM_START(%0)
+ BOOST_ATOMIC_ARM_DMB
+ BOOST_ATOMIC_ARM_ASM_END(%0)
+ : "=&l" (brtmp) :: "memory"
+ );
+ default:;
+ }
+}
+
+static inline void fence_after(memory_order order)
+{
+ // FIXME I don't understand enough about barriers to know what this should do.
+ switch(order) {
+ case memory_order_acquire:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ int brtmp;
+ __asm__ __volatile__ (
+ BOOST_ATOMIC_ARM_ASM_START(%0)
+ BOOST_ATOMIC_ARM_DMB
+ BOOST_ATOMIC_ARM_ASM_END(%0)
+ : "=&l" (brtmp) :: "memory"
+ );
+ case memory_order_consume:
+ __asm__ __volatile__ ("" ::: "memory");
+ default:;
+ }
+}
+
+#undef BOOST_ATOMIC_ARM_DMB
+
+
+template<typename T>
+class atomic_arm_4 {
+public:
+ typedef T integral_type;
+ explicit atomic_arm_4(T v) : i(v) {}
+ atomic_arm_4() {}
+ T load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ T v=const_cast<volatile const T &>(i);
+ fence_after(order);
+ return v;
+ }
+ void store(T v, memory_order order=memory_order_seq_cst) volatile
+ {
+ fence_before(order);
+ const_cast<volatile T &>(i)=v;
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ fence_before(success_order);
+ int success;
+ int tmp;
+ __asm__ __volatile__(
+ BOOST_ATOMIC_ARM_ASM_START(%2)
+ "mov %1, #0\n" // success = 0
+ "ldrex %0, [%3]\n" // expected' = *(&i)
+ "teq %0, %4\n" // flags = expected'==expected
+ "ittt eq\n"
+ "strexeq %2, %5, [%3]\n" // if (flags.equal) *(&i) = desired, tmp = !OK
+ "teqeq %2, #0\n" // if (flags.equal) flags = tmp==0
+ "moveq %1, #1\n" // if (flags.equal) success = 1
+ BOOST_ATOMIC_ARM_ASM_END(%2)
+ : "=&r" (expected), // %0
+ "=&r" (success), // %1
+ "=&l" (tmp) // %2
+ : "r" (&i), // %3
+ "r" (expected), // %4
+ "r" ((int)desired) // %5
+ : "cc"
+ );
+ if (success) fence_after(success_order);
+ else fence_after(failure_order);
+ return success;
+ }
+
+ bool is_lock_free(void) const volatile {return true;}
+protected:
+ inline T fetch_add_var(T c, memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ int tmp2;
+ __asm__ __volatile__(
+ BOOST_ATOMIC_ARM_ASM_START(%2)
+ "1: ldrex %0, [%3]\n" // original = *(&i)
+ "add %1, %0, %4\n" // tmp = original + c
+ "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK
+ "teq %2, #0\n" // flags = tmp2==0
+ "it ne\n"
+ "bne 1b\n" // if (!flags.equal) goto 1
+ BOOST_ATOMIC_ARM_ASM_END(%2)
+ : "=&r" (original), // %0
+ "=&r" (tmp), // %1
+ "=&l" (tmp2) // %2
+ : "r" (&i), // %3
+ "r" (c) // %4
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_inc(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ int tmp2;
+ __asm__ __volatile__(
+ BOOST_ATOMIC_ARM_ASM_START(%2)
+ "1: ldrex %0, [%3]\n" // original = *(&i)
+ "add %1, %0, #1\n" // tmp = original + 1
+ "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK
+ "teq %2, #0\n" // flags = tmp2==0
+ "it ne\n"
+ "bne 1b\n" // if (!flags.equal) goto 1
+ BOOST_ATOMIC_ARM_ASM_END(%2)
+ : "=&r" (original), // %0
+ "=&r" (tmp), // %1
+ "=&l" (tmp2) // %2
+ : "r" (&i) // %3
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_dec(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ int tmp2;
+ __asm__ __volatile__(
+ BOOST_ATOMIC_ARM_ASM_START(%2)
+ "1: ldrex %0, [%3]\n" // original = *(&i)
+ "sub %1, %0, #1\n" // tmp = original - 1
+ "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK
+ "teq %2, #0\n" // flags = tmp2==0
+ "it ne\n"
+ "bne 1b\n" // if (!flags.equal) goto 1
+ BOOST_ATOMIC_ARM_ASM_END(%2)
+ : "=&r" (original), // %0
+ "=&r" (tmp), // %1
+ "=&l" (tmp2) // %2
+ : "r" (&i) // %3
+ : "cc"
+ );
+ fence_after(order);
+ return original;
+ }
+private:
+ T i;
+};
+
+
+// #ifdef _ARM_ARCH_7
+// FIXME TODO can add native byte and halfword version here
+
+
+template<typename T>
+class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_arm_4<T> > > {
+public:
+ typedef build_atomic_from_typical<build_exchange<atomic_arm_4<T> > > super;
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> {
+public:
+ typedef build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> {
+public:
+ typedef build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+
+
+typedef build_exchange<atomic_arm_4<void *> > platform_atomic_address;
+
+}
+}
+}
+
+#undef BOOST_ATOMIC_ARM_ASM_START
+#undef BOOST_ATOMIC_ARM_ASM_END
+
+
+#endif
351 c_src/boost.atomic/boost/atomic/detail/gcc-ppc.hpp
@@ -0,0 +1,351 @@
+#ifndef BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
+#define BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/detail/base.hpp>
+#include <boost/atomic/detail/builder.hpp>
+
+/*
+ Refer to: Motorola: "Programming Environments Manual for 32-Bit
+ Implementations of the PowerPC Architecture", Appendix E:
+ "Synchronization Programming Examples" for an explanation of what is
+ going on here (can be found on the web at various places by the
+ name "MPCFPE32B.pdf", Google is your friend...)
+ */
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+static inline void fence_before(memory_order order)
+{
+ switch(order) {
+ case memory_order_release:
+ case memory_order_acq_rel:
+#if defined(__powerpc64__)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+ break;
+#endif
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("sync" ::: "memory");
+ default:;
+ }
+}
+
+/* Note on the barrier instructions used by fence_after and
+atomic_thread_fence: the "isync" instruction normally does
+not wait for memory-accessing operations to complete, the
+"trick" is to introduce a conditional branch that formally
+depends on the memory-accessing instruction -- isync waits
+until the branch can be resolved and thus implicitly until
+the memory access completes.
+
+This means that the load(memory_order_relaxed) instruction
+includes this branch, even though no barrier would be required
+here, but as a consequence atomic_thread_fence(memory_order_acquire)
+would have to be implemented using "sync" instead of "isync".
+The following simple cost-analysis provides the rationale
+for this decision:
+
+- isync: about ~12 cycles
+- sync: about ~50 cycles
+- "spurious" branch after load: 1-2 cycles
+- making the right decision: priceless
+
+*/
+
+static inline void fence_after(memory_order order)
+{
+ switch(order) {
+ case memory_order_acquire:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("isync");
+ case memory_order_consume:
+ __asm__ __volatile__ ("" ::: "memory");
+ default:;
+ }
+}
+
+template<>
+inline void platform_atomic_thread_fence(memory_order order)
+{
+ switch(order) {
+ case memory_order_acquire:
+ __asm__ __volatile__ ("isync" ::: "memory");
+ break;
+ case memory_order_release:
+ case memory_order_acq_rel:
+#if defined(__powerpc64__)
+ __asm__ __volatile__ ("lwsync" ::: "memory");
+ break;
+#endif
+ case memory_order_seq_cst:
+ __asm__ __volatile__ ("sync" ::: "memory");
+ default:;
+ }
+}
+
+
+/* note: the __asm__ constraint "b" instructs gcc to use any register
+except r0; this is required because r0 is not allowed in
+some places. Since I am sometimes unsure if it is allowed
+or not just play it safe and avoid r0 entirely -- ppc isn't
+exactly register-starved, so this really should not matter :) */
+
+template<typename T>
+class atomic_ppc_32 {
+public:
+ typedef T integral_type;
+ explicit atomic_ppc_32(T v) : i(v) {}
+ atomic_ppc_32() {}
+ T load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ T v=*reinterpret_cast<volatile const T *>(&i);
+ __asm__ __volatile__ (
+ "cmpw %0, %0\n"
+ "bne- 1f\n"
+ "1f:\n"
+ : "+b"(v));
+ fence_after(order);
+ return v;
+ }
+ void store(T v, memory_order order=memory_order_seq_cst) volatile
+ {
+ fence_before(order);
+ *reinterpret_cast<volatile T *>(&i)=v;
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ fence_before(success_order);
+ int success;
+ __asm__ __volatile__(
+ "lwarx %0,0,%2\n"
+ "cmpw %0, %3\n"
+ "bne- 2f\n"
+ "stwcx. %4,0,%2\n"
+ "bne- 2f\n"
+ "addi %1,0,1\n"
+ "1:"
+
+ ".subsection 2\n"
+ "2: addi %1,0,0\n"
+ "b 1b\n"
+ ".previous\n"
+ : "=&b" (expected), "=&b" (success)
+ : "b" (&i), "b" (expected), "b" ((int)desired)
+ );
+ if (success) fence_after(success_order);
+ else fence_after(failure_order);
+ return success;
+ }
+
+ bool is_lock_free(void) const volatile {return true;}
+protected:
+ inline T fetch_add_var(T c, memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ __asm__ __volatile__(
+ "1: lwarx %0,0,%2\n"
+ "add %1,%0,%3\n"
+ "stwcx. %1,0,%2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp)
+ : "b" (&i), "b" (c)
+ : "cc");
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_inc(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ __asm__ __volatile__(
+ "1: lwarx %0,0,%2\n"
+ "addi %1,%0,1\n"
+ "stwcx. %1,0,%2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp)
+ : "b" (&i)
+ : "cc");
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_dec(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ __asm__ __volatile__(
+ "1: lwarx %0,0,%2\n"
+ "addi %1,%0,-1\n"
+ "stwcx. %1,0,%2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp)
+ : "b" (&i)
+ : "cc");
+ fence_after(order);
+ return original;
+ }
+private:
+ T i;
+};
+
+#if defined(__powerpc64__)
+
+#warning Untested code -- please inform me if it works
+
+template<typename T>
+class atomic_ppc_64 {
+public:
+ typedef T integral_type;
+ explicit atomic_ppc_64(T v) : i(v) {}
+ atomic_ppc_64() {}
+ T load(memory_order order=memory_order_seq_cst) const volatile
+ {
+ T v=*reinterpret_cast<volatile const T *>(&i);
+ __asm__ __volatile__ (
+ "cmpw %0, %0\n"
+ "bne- 1f\n"
+ "1f:\n"
+ : "+b"(v));
+ fence_after(order);
+ return v;
+ }
+ void store(T v, memory_order order=memory_order_seq_cst) volatile
+ {
+ fence_before(order);
+ *reinterpret_cast<volatile T *>(&i)=v;
+ }
+ bool compare_exchange_weak(
+ T &expected,
+ T desired,
+ memory_order success_order,
+ memory_order failure_order) volatile
+ {
+ fence_before(success_order);
+ int success;
+ __asm__ __volatile__(
+ "ldarx %0,0,%2\n"
+ "cmpw %0, %3\n"
+ "bne- 2f\n"
+ "stdcx. %4,0,%2\n"
+ "bne- 2f\n"
+ "addi %1,0,1\n"
+ "1:"
+
+ ".subsection 2\n"
+ "2: addi %1,0,0\n"
+ "b 1b\n"
+ ".previous\n"
+ : "=&b" (expected), "=&b" (success)
+ : "b" (&i), "b" (expected), "b" ((int)desired)
+ );
+ if (success) fence_after(success_order);
+ else fence_after(failure_order);
+ fence_after(order);
+ return success;
+ }
+
+ bool is_lock_free(void) const volatile {return true;}
+protected:
+ inline T fetch_add_var(T c, memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%2\n"
+ "add %1,%0,%3\n"
+ "stdcx. %1,0,%2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp)
+ : "b" (&i), "b" (c)
+ : "cc");
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_inc(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%2\n"
+ "addi %1,%0,1\n"
+ "stdcx. %1,0,%2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp)
+ : "b" (&i)
+ : "cc");
+ fence_after(order);
+ return original;
+ }
+ inline T fetch_dec(memory_order order) volatile
+ {
+ fence_before(order);
+ T original, tmp;
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%2\n"
+ "addi %1,%0,-1\n"
+ "stdcx. %1,0,%2\n"
+ "bne- 1b\n"
+ : "=&b" (original), "=&b" (tmp)
+ : "b" (&i)
+ : "cc");
+ fence_after(order);
+ return original;
+ }
+private:
+ T i;
+};
+#endif
+
+template<typename T>
+class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_ppc_32<T> > > {
+public:
+ typedef build_atomic_from_typical<build_exchange<atomic_ppc_32<T> > > super;
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> {
+public:
+ typedef build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+template<typename T>
+class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> {
+public:
+ typedef build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> super;
+
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+
+#if defined(__powerpc64__)
+template<typename T>
+class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_ppc_64<T> > > {
+public:
+ typedef build_atomic_from_typical<build_exchange<atomic_ppc_64<T> > > super;
+ explicit platform_atomic_integral(T v) : super(v) {}
+ platform_atomic_integral(void) {}
+};
+#endif
+
+}
+}
+}
+
+#endif
454 c_src/boost.atomic/boost/atomic/detail/gcc-x86.hpp
@@ -0,0 +1,454 @@
+#ifndef BOOST_DETAIL_ATOMIC_GCC_X86_HPP
+#define BOOST_DETAIL_ATOMIC_GCC_X86_HPP
+
+// Copyright (c) 2009 Helge Bahmann
+//
+// Distributed under the Boost Software License, Version 1.0.
+// See accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+#include <boost/atomic/detail/base.hpp>
+#include <boost/atomic/detail/builder.hpp>
+
+namespace boost {
+namespace detail {
+namespace atomic {
+
+static inline void fence_before(memory_order order)
+{
+ switch(order) {
+ case memory_order_consume:
+ case memory_order_release:
+ case memory_order_acq_rel:
+ case memory_order_seq_cst: