Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Merge commit 'HEAD@{2}'

  • Loading branch information...
commit ce6352e88c22154d53316bae04b0b110db61519b 2 parents 3fc685e + 59a3cc3
@argv0 argv0 authored
Showing with 19 additions and 3,503 deletions.
  1. +3 −65 c_src/basho_metrics_nifs.cpp
  2. +3 −7 c_src/basho_metrics_nifs.h
  3. +0 −23 c_src/boost.atomic/LICENSE_1_0.txt
  4. +0 −204 c_src/boost.atomic/boost/atomic.hpp
  5. +0 −186 c_src/boost.atomic/boost/atomic/detail/base.hpp
  6. +0 −405 c_src/boost.atomic/boost/atomic/detail/builder.hpp
  7. +0 −76 c_src/boost.atomic/boost/atomic/detail/fallback.hpp
  8. +0 −354 c_src/boost.atomic/boost/atomic/detail/gcc-alpha.hpp
  9. +0 −299 c_src/boost.atomic/boost/atomic/detail/gcc-armv6+.hpp
  10. +0 −351 c_src/boost.atomic/boost/atomic/detail/gcc-ppc.hpp
  11. +0 −454 c_src/boost.atomic/boost/atomic/detail/gcc-x86.hpp
  12. +0 −192 c_src/boost.atomic/boost/atomic/detail/generic-cas.hpp
  13. +0 −292 c_src/boost.atomic/boost/atomic/detail/integral-casts.hpp
  14. +0 −131 c_src/boost.atomic/boost/atomic/detail/interlocked.hpp
  15. +0 −169 c_src/boost.atomic/boost/atomic/detail/linux-arm.hpp
  16. +0 −37 c_src/boost.atomic/boost/atomic/detail/valid_integral_types.hpp
  17. +0 −42 c_src/boost.atomic/boost/atomic/platform.hpp
  18. +10 −12 c_src/ewma.hpp
  19. +3 −4 c_src/meter_metric.hpp
  20. +0 −55 src/basho_metric.erl
  21. +0 −1  src/basho_metrics.app.src
  22. +0 −48 src/basho_metrics.erl
  23. +0 −16 src/basho_metrics_app.erl
  24. +0 −17 src/basho_metrics_histogram.erl
  25. +0 −20 src/basho_metrics_meter.erl
  26. +0 −12 src/basho_metrics_nifs.erl
  27. +0 −31 src/basho_metrics_sup.erl
View
68 c_src/basho_metrics_nifs.cpp
@@ -25,14 +25,8 @@
#include <vector>
static ErlNifResourceType* histogram_RESOURCE;
-static ErlNifResourceType* counter_RESOURCE;
static ErlNifResourceType* meter_RESOURCE;
-struct counter
-{
- boost::atomic_uint64_t value;
-};
-
struct meter_handle
{
meter<> *p;
@@ -43,17 +37,11 @@ struct histogram_handle
histogram<> *p;
};
-struct counter_handle
-{
- counter *p;
-};
-
// Atoms (initialized in on_load)
static ERL_NIF_TERM ATOM_TRUE;
static ERL_NIF_TERM ATOM_FALSE;
static ERL_NIF_TERM ATOM_OK;
static ERL_NIF_TERM ATOM_ERROR;
-static ERL_NIF_TERM ATOM_NOT_FOUND;
static ERL_NIF_TERM ATOM_MIN;
static ERL_NIF_TERM ATOM_MAX;
static ERL_NIF_TERM ATOM_MEAN;
@@ -72,9 +60,6 @@ static ErlNifFunc nif_funcs[] =
{"histogram_update", 2, histogram_update},
{"histogram_stats", 1, histogram_stats},
{"histogram_clear", 1, histogram_clear},
- {"counter_new", 0, counter_new},
- {"counter_increment", 1, counter_increment},
- {"counter_value", 1, counter_value},
{"meter_new", 0, meter_new},
{"meter_update", 2, meter_update},
{"meter_tick", 1, meter_tick},
@@ -84,42 +69,6 @@ static ErlNifFunc nif_funcs[] =
#define ATOM(Id, Value) { Id = enif_make_atom(env, Value); }
#define STAT_TUPLE(Key, Value) enif_make_tuple2(env, Key, enif_make_long(env, Value))
-ERL_NIF_TERM counter_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- counter_handle* handle =
- (counter_handle *)enif_alloc_resource(counter_RESOURCE,
- sizeof(counter_handle));
- memset(handle, '\0', sizeof(counter_handle));
- handle->p = new counter;
- ERL_NIF_TERM result = enif_make_resource(env, handle);
- enif_release_resource(handle);
- return enif_make_tuple2(env, ATOM_OK, result);
-}
-
-ERL_NIF_TERM counter_increment(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- counter_handle* handle;
- if (enif_get_resource(env,argv[0], counter_RESOURCE,(void**)&handle))
- {
- ++handle->p->value;
- return ATOM_OK;
- }
- else
- {
- return enif_make_badarg(env);
- }
-}
-
-ERL_NIF_TERM counter_value(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
-{
- counter_handle* handle;
- if (enif_get_resource(env,argv[0], counter_RESOURCE,(void**)&handle))
- return enif_make_uint64(env, handle->p->value);
- else
- return enif_make_badarg(env);
-}
-
-
ERL_NIF_TERM histogram_new(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
histogram_handle *handle =
@@ -232,7 +181,9 @@ ERL_NIF_TERM meter_stats(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
meter_handle* handle;
if (enif_get_resource(env,argv[0],meter_RESOURCE,(void**)&handle))
{
- return enif_make_list3(env,
+ return enif_make_list4(env,
+ enif_make_tuple2(env,ATOM_COUNT,
+ enif_make_ulong(env, handle->p->count())),
enif_make_tuple2(env,ATOM_ONE,
enif_make_double(env,handle->p->one())),
enif_make_tuple2(env,ATOM_FIVE,enif_make_double(env, handle->p->five())),
@@ -254,12 +205,6 @@ static void meter_resource_cleanup(ErlNifEnv* env, void* arg)
delete handle->p;
}
-static void counter_resource_cleanup(ErlNifEnv* env, void* arg)
-{
- counter_handle* handle = (counter_handle*)arg;
- delete handle->p;
-}
-
static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
{
ErlNifResourceFlags flags = (ErlNifResourceFlags)
@@ -270,12 +215,6 @@ static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
&histogram_resource_cleanup,
flags,
NULL);
- counter_RESOURCE = enif_open_resource_type(env,
- NULL,
- "counter_resource",
- &counter_resource_cleanup,
- flags,
- NULL);
meter_RESOURCE = enif_open_resource_type(env,
NULL,
"meter_resource",
@@ -287,7 +226,6 @@ static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
ATOM(ATOM_ERROR, "error");
ATOM(ATOM_TRUE, "true");
ATOM(ATOM_FALSE, "false");
- ATOM(ATOM_NOT_FOUND, "not_found");
ATOM(ATOM_MIN, "min");
ATOM(ATOM_MAX, "max");
ATOM(ATOM_MEAN, "mean");
View
10 c_src/basho_metrics_nifs.h
@@ -1,6 +1,6 @@
// -------------------------------------------------------------------
//
-//
+// basho_metrics: fast performance metrics for Erlang.
//
// Copyright (c) 2011 Basho Technologies, Inc. All Rights Reserved.
//
@@ -19,8 +19,8 @@
// under the License.
//
// -------------------------------------------------------------------
-#ifndef INCL_ELEVELDB_H
-#define INCL_ELEVELDB_H
+#ifndef BASHO_METRICS_NIFS_H_
+#define BASHO_METRICS_NIFS_H_
extern "C" {
@@ -31,10 +31,6 @@ ERL_NIF_TERM histogram_new(ErlNifEnv*, int, const ERL_NIF_TERM[]);
ERL_NIF_TERM histogram_stats(ErlNifEnv*, int, const ERL_NIF_TERM[]);
ERL_NIF_TERM histogram_clear(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-ERL_NIF_TERM counter_new(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-ERL_NIF_TERM counter_increment(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-ERL_NIF_TERM counter_value(ErlNifEnv*, int, const ERL_NIF_TERM[]);
-
ERL_NIF_TERM meter_new(ErlNifEnv*, int, const ERL_NIF_TERM[]);
ERL_NIF_TERM meter_update(ErlNifEnv*, int, const ERL_NIF_TERM[]);
ERL_NIF_TERM meter_tick(ErlNifEnv*,int, const ERL_NIF_TERM[]);
View
23 c_src/boost.atomic/LICENSE_1_0.txt
@@ -1,23 +0,0 @@
-Boost Software License - Version 1.0 - August 17th, 2003
-
-Permission is hereby granted, free of charge, to any person or organization
-obtaining a copy of the software and accompanying documentation covered by
-this license (the "Software") to use, reproduce, display, distribute,
-execute, and transmit the Software, and to prepare derivative works of the
-Software, and to permit third-parties to whom the Software is furnished to
-do so, all subject to the following:
-
-The copyright notices in the Software and this entire statement, including
-the above license grant, this restriction and the following disclaimer,
-must be included in all copies of the Software, in whole or in part, and
-all derivative works of the Software, unless such copies or derivative
-works are solely in the form of machine-executable object code generated by
-a source language processor.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
-SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
-FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
-ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
View
204 c_src/boost.atomic/boost/atomic.hpp
@@ -1,204 +0,0 @@
-#ifndef BOOST_ATOMIC_HPP
-#define BOOST_ATOMIC_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <cstddef>
-
-#include <boost/memory_order.hpp>
-#include <boost/atomic/platform.hpp>
-#include <boost/atomic/detail/base.hpp>
-#include <boost/atomic/detail/integral-casts.hpp>
-
-namespace boost {
-
-template<typename T>
-class atomic : public detail::atomic::internal_atomic<T> {
-public:
- typedef detail::atomic::internal_atomic<T> super;
-
- atomic() {}
- explicit atomic(T v) : super(v) {}
-private:
- atomic(const atomic &);
- void operator=(const atomic &);
-};
-
-
-template<>
-class atomic<bool> : private detail::atomic::internal_atomic<bool> {
-public:
- typedef detail::atomic::internal_atomic<bool> super;
-
- atomic() {}
- explicit atomic(bool v) : super(v) {}
-
- using super::load;
- using super::store;
- using super::compare_exchange_strong;
- using super::compare_exchange_weak;
- using super::exchange;
- using super::is_lock_free;
-
- operator bool(void) const volatile {return load();}
- bool operator=(bool v) volatile {store(v); return v;}
-private:
- atomic(const atomic &);
- void operator=(const atomic &);
-};
-
-template<>
-class atomic<void *> : private detail::atomic::internal_atomic<void *, sizeof(void *), int> {
-public:
- typedef detail::atomic::internal_atomic<void *, sizeof(void *), int> super;
-
- atomic() {}
- explicit atomic(void * p) : super(p) {}
- using super::load;
- using super::store;
- using super::compare_exchange_strong;
- using super::compare_exchange_weak;
- using super::exchange;
- using super::is_lock_free;
-
- operator void *(void) const volatile {return load();}
- void * operator=(void * v) volatile {store(v); return v;}
-
-private:
- atomic(const atomic &);
- void * operator=(const atomic &);
-};
-
-/* FIXME: pointer arithmetic still missing */
-
-template<typename T>
-class atomic<T *> : private detail::atomic::internal_atomic<intptr_t> {
-public:
- typedef detail::atomic::internal_atomic<intptr_t> super;
-
- atomic() {}
- explicit atomic(T * p) : super((intptr_t)p) {}
-
- T *load(memory_order order=memory_order_seq_cst) const volatile
- {
- return (T*)super::load(order);
- }
- void store(T *v, memory_order order=memory_order_seq_cst) volatile
- {
- super::store((intptr_t)v, order);
- }
- bool compare_exchange_strong(
- T * &expected,
- T * desired,
- memory_order order=memory_order_seq_cst) volatile
- {
- return compare_exchange_strong(expected, desired, order, detail::atomic::calculate_failure_order(order));
- }
- bool compare_exchange_weak(
- T * &expected,
- T *desired,
- memory_order order=memory_order_seq_cst) volatile
- {
- return compare_exchange_weak(expected, desired, order, detail::atomic::calculate_failure_order(order));
- }
- bool compare_exchange_weak(
- T * &expected,
- T *desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- intptr_t expected_=(intptr_t)expected, desired_=(intptr_t)desired;
- bool success=super::compare_exchange_weak(expected_, desired_, success_order, failure_order);
- expected=(T*)expected_;
- return success;
- }
- bool compare_exchange_strong(
- T * &expected,
- T *desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- intptr_t expected_=(intptr_t)expected, desired_=(intptr_t)desired;
- bool success=super::compare_exchange_strong(expected_, desired_, success_order, failure_order);
- expected=(T*)expected_;
- return success;
- }
- T *exchange(T * replacement, memory_order order=memory_order_seq_cst) volatile
- {
- return (T*)super::exchange((intptr_t)replacement, order);
- }
- using super::is_lock_free;
-
- operator T *(void) const volatile {return load();}
- T * operator=(T * v) volatile {store(v); return v;}
-
- T * fetch_add(ptrdiff_t diff, memory_order order=memory_order_seq_cst) volatile
- {
- return (T*)super::fetch_add(diff*sizeof(T), order);
- }
- T * fetch_sub(ptrdiff_t diff, memory_order order=memory_order_seq_cst) volatile
- {
- return (T*)super::fetch_sub(diff*sizeof(T), order);
- }
-
- T *operator++(void) volatile {return fetch_add(1)+1;}
- T *operator++(int) volatile {return fetch_add(1);}
- T *operator--(void) volatile {return fetch_sub(1)-1;}
- T *operator--(int) volatile {return fetch_sub(1);}
-private:
- atomic(const atomic &);
- T * operator=(const atomic &);
-};
-
-class atomic_flag : private atomic<int> {
-public:
- typedef atomic<int> super;
- using super::is_lock_free;
-
- atomic_flag(bool initial_state) : super(initial_state?1:0) {}
- atomic_flag() {}
-
- bool test_and_set(memory_order order=memory_order_seq_cst)
- {
- return super::exchange(1, order) != 0;
- }
- void clear(memory_order order=memory_order_seq_cst)
- {
- super::store(0, order);
- }
-};
-
-typedef atomic<char> atomic_char;
-typedef atomic<unsigned char> atomic_uchar;
-typedef atomic<signed char> atomic_schar;
-typedef atomic<uint8_t> atomic_uint8_t;
-typedef atomic<int8_t> atomic_int8_t;
-typedef atomic<unsigned short> atomic_ushort;
-typedef atomic<short> atomic_short;
-typedef atomic<uint16_t> atomic_uint16_t;
-typedef atomic<int16_t> atomic_int16_t;
-typedef atomic<unsigned int> atomic_uint;
-typedef atomic<int> atomic_int;
-typedef atomic<uint32_t> atomic_uint32_t;
-typedef atomic<int32_t> atomic_int32_t;
-typedef atomic<unsigned long> atomic_ulong;
-typedef atomic<long> atomic_long;
-typedef atomic<uint64_t> atomic_uint64_t;
-typedef atomic<int64_t> atomic_int64_t;
-typedef atomic<unsigned long long> atomic_ullong;
-typedef atomic<long long> atomic_llong;
-typedef atomic<void*> atomic_address;
-typedef atomic<bool> atomic_bool;
-
-static inline void atomic_thread_fence(memory_order order)
-{
- detail::atomic::platform_atomic_thread_fence<memory_order>(order);
-}
-
-}
-
-#endif
View
186 c_src/boost.atomic/boost/atomic/detail/base.hpp
@@ -1,186 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_BASE_HPP
-#define BOOST_DETAIL_ATOMIC_BASE_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/atomic/detail/fallback.hpp>
-#include <boost/atomic/detail/builder.hpp>
-#include <boost/atomic/detail/valid_integral_types.hpp>
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-static inline memory_order calculate_failure_order(memory_order order)
-{
- switch(order) {
- case memory_order_acq_rel: return memory_order_acquire;
- case memory_order_release: return memory_order_relaxed;
- default: return order;
- }
-}
-
-template<typename T, unsigned short Size=sizeof(T)>
-class platform_atomic : public fallback_atomic<T> {
-public:
- typedef fallback_atomic<T> super;
-
- explicit platform_atomic(T v) : super(v) {}
- platform_atomic() {}
-protected:
- typedef typename super::integral_type integral_type;
-};
-
-template<typename T, unsigned short Size=sizeof(T)>
-class platform_atomic_integral : public build_atomic_from_exchange<fallback_atomic<T> > {
-public:
- typedef build_atomic_from_exchange<fallback_atomic<T> > super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral() {}
-protected:
- typedef typename super::integral_type integral_type;
-};
-
-template<typename T>
-static inline void platform_atomic_thread_fence(T order)
-{
- /* FIXME: this does not provide
- sequential consistency, need one global
- variable for that... */
- platform_atomic<int> a;
- a.exchange(0, order);
-}
-
-template<typename T, unsigned short Size=sizeof(T), typename Int=typename is_integral_type<T>::test>
-class internal_atomic;
-
-template<typename T, unsigned short Size>
-class internal_atomic<T, Size, void> : private detail::atomic::platform_atomic<T> {
-public:
- typedef detail::atomic::platform_atomic<T> super;
-
- internal_atomic() {}
- explicit internal_atomic(T v) : super(v) {}
-
- operator T(void) const volatile {return load();}
- T operator=(T v) volatile {store(v); return v;}
-
- using super::is_lock_free;
- using super::load;
- using super::store;
- using super::exchange;
-
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order order=memory_order_seq_cst) volatile
- {
- return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order order=memory_order_seq_cst) volatile
- {
- return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
- }
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return super::compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return super::compare_exchange_strong(expected, desired, success_order, failure_order);
- }
-private:
- internal_atomic(const internal_atomic &);
- void operator=(const internal_atomic &);
-};
-
-template<typename T, unsigned short Size>
-class internal_atomic<T, Size, int> : private detail::atomic::platform_atomic_integral<T> {
-public:
- typedef detail::atomic::platform_atomic_integral<T> super;
- typedef typename super::integral_type integral_type;
-
- internal_atomic() {}
- explicit internal_atomic(T v) : super(v) {}
-
- using super::is_lock_free;
- using super::load;
- using super::store;
- using super::exchange;
- using super::fetch_add;
- using super::fetch_sub;
- using super::fetch_and;
- using super::fetch_or;
- using super::fetch_xor;
-
- operator integral_type(void) const volatile {return load();}
- integral_type operator=(integral_type v) volatile {store(v); return v;}
-
- integral_type operator&=(integral_type c) volatile {return fetch_and(c)&c;}
- integral_type operator|=(integral_type c) volatile {return fetch_or(c)|c;}
- integral_type operator^=(integral_type c) volatile {return fetch_xor(c)^c;}
-
- integral_type operator+=(integral_type c) volatile {return fetch_add(c)+c;}
- integral_type operator-=(integral_type c) volatile {return fetch_sub(c)-c;}
-
- integral_type operator++(void) volatile {return fetch_add(1)+1;}
- integral_type operator++(int) volatile {return fetch_add(1);}
- integral_type operator--(void) volatile {return fetch_sub(1)-1;}
- integral_type operator--(int) volatile {return fetch_sub(1);}
-
- bool compare_exchange_strong(
- integral_type &expected,
- integral_type desired,
- memory_order order=memory_order_seq_cst) volatile
- {
- return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
- }
- bool compare_exchange_weak(
- integral_type &expected,
- integral_type desired,
- memory_order order=memory_order_seq_cst) volatile
- {
- return super::compare_exchange_strong(expected, desired, order, calculate_failure_order(order));
- }
- bool compare_exchange_strong(
- integral_type &expected,
- integral_type desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return super::compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- bool compare_exchange_weak(
- integral_type &expected,
- integral_type desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return super::compare_exchange_strong(expected, desired, success_order, failure_order);
- }
-private:
- internal_atomic(const internal_atomic &);
- void operator=(const internal_atomic &);
-};
-
-}
-}
-}
-
-#endif
View
405 c_src/boost.atomic/boost/atomic/detail/builder.hpp
@@ -1,405 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_BUILDER_HPP
-#define BOOST_DETAIL_ATOMIC_BUILDER_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/detail/endian.hpp>
-#include <boost/atomic/detail/valid_integral_types.hpp>
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-
-generates exchange and compare_exchange_strong
-*/
-template<typename Base>
-class build_exchange : public Base {
-public:
- typedef typename Base::integral_type integral_type;
-
- using Base::load;
- using Base::compare_exchange_weak;
-
- bool compare_exchange_strong(
- integral_type &expected,
- integral_type desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- integral_type expected_save=expected;
- while(true) {
- if (compare_exchange_weak(expected, desired, success_order, failure_order)) return true;
- if (expected_save!=expected) return false;
- expected=expected_save;
- }
- }
-
- integral_type exchange(integral_type replacement, memory_order order=memory_order_seq_cst) volatile
- {
- integral_type o=load(memory_order_relaxed);
- do {} while(!compare_exchange_weak(o, replacement, order, memory_order_relaxed));
- return o;
- }
-
- build_exchange() {}
- explicit build_exchange(integral_type i) : Base(i) {}
-};
-
-/*
-given a Base that implements:
-
-- fetch_add_var(integral_type c, memory_order order)
-- fetch_inc(memory_order order)
-- fetch_dec(memory_order order)
-
-creates a fetch_add method that delegates to fetch_inc/fetch_dec if operand
-is constant +1/-1, and uses fetch_add_var otherwise
-
-the intention is to allow optimizing the incredibly common case of +1/-1
-*/
-template<typename Base>
-class build_const_fetch_add : public Base {
-public:
- typedef typename Base::integral_type integral_type;
-
- integral_type fetch_add(
- integral_type c,
- memory_order order=memory_order_seq_cst) volatile
- {
- if (__builtin_constant_p(c)) {
- switch(c) {
- case -1: return fetch_dec(order);
- case 1: return fetch_inc(order);
- }
- }
- return fetch_add_var(c, order);
- }
-
- build_const_fetch_add() {}
- explicit build_const_fetch_add(integral_type i) : Base(i) {}
-protected:
- using Base::fetch_add_var;
- using Base::fetch_inc;
- using Base::fetch_dec;
-};
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-
-generates a -- not very efficient, but correct -- fetch_add operation
-*/
-template<typename Base>
-class build_fetch_add : public Base {
-public:
- typedef typename Base::integral_type integral_type;
-
- using Base::compare_exchange_weak;
-
- integral_type fetch_add(
- integral_type c, memory_order order=memory_order_seq_cst) volatile
- {
- integral_type o=Base::load(memory_order_relaxed), n;
- do {n=o+c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
- return o;
- }
-
- build_fetch_add() {}
- explicit build_fetch_add(integral_type i) : Base(i) {}
-};
-
-/*
-given a Base that implements:
-
-- fetch_add(integral_type c, memory_order order)
-
-generates fetch_sub and post/pre- increment/decrement operators
-*/
-template<typename Base>
-class build_arithmeticops : public Base {
-public:
- typedef typename Base::integral_type integral_type;
-
- using Base::fetch_add;
-
- integral_type fetch_sub(
- integral_type c,
- memory_order order=memory_order_seq_cst) volatile
- {
- return fetch_add(-c, order);
- }
-
- build_arithmeticops() {}
- explicit build_arithmeticops(integral_type i) : Base(i) {}
-};
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-
-generates -- not very efficient, but correct -- fetch_and, fetch_or and fetch_xor operators
-*/
-template<typename Base>
-class build_logicops : public Base {
-public:
- typedef typename Base::integral_type integral_type;
-
- using Base::compare_exchange_weak;
- using Base::load;
-
- integral_type fetch_and(integral_type c, memory_order order=memory_order_seq_cst) volatile
- {
- integral_type o=load(memory_order_relaxed), n;
- do {n=o&c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
- return o;
- }
- integral_type fetch_or(integral_type c, memory_order order=memory_order_seq_cst) volatile
- {
- integral_type o=load(memory_order_relaxed), n;
- do {n=o|c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
- return o;
- }
- integral_type fetch_xor(integral_type c, memory_order order=memory_order_seq_cst) volatile
- {
- integral_type o=load(memory_order_relaxed), n;
- do {n=o^c;} while(!compare_exchange_weak(o, n, order, memory_order_relaxed));
- return o;
- }
-
- build_logicops() {}
- build_logicops(integral_type i) : Base(i) {}
-};
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- store(integral_type i, memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-
-generates the full set of atomic operations for integral types
-*/
-template<typename Base>
-class build_atomic_from_minimal : public build_logicops< build_arithmeticops< build_fetch_add< build_exchange<Base> > > > {
-public:
- typedef build_logicops< build_arithmeticops< build_fetch_add< build_exchange<Base> > > > super;
- typedef typename super::integral_type integral_type;
-
- build_atomic_from_minimal(void) {}
- build_atomic_from_minimal(typename super::integral_type i) : super(i) {}
-};
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- store(integral_type i, memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order)
-- exchange(integral_type replacement, memory_order order)
-- fetch_add_var(integral_type c, memory_order order)
-- fetch_inc(memory_order order)
-- fetch_dec(memory_order order)
-
-generates the full set of atomic operations for integral types
-*/
-template<typename Base>
-class build_atomic_from_typical : public build_logicops< build_arithmeticops< build_const_fetch_add<Base> > > {
-public:
- typedef build_logicops< build_arithmeticops< build_const_fetch_add<Base> > > super;
- typedef typename super::integral_type integral_type;
-
- build_atomic_from_typical(void) {}
- build_atomic_from_typical(typename super::integral_type i) : super(i) {}
-};
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- store(integral_type i, memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order)
-- exchange(integral_type replacement, memory_order order)
-- fetch_add(integral_type c, memory_order order)
-
-generates the full set of atomic operations for integral types
-*/
-template<typename Base>
-class build_atomic_from_add : public build_logicops< build_arithmeticops<Base> > {
-public:
- typedef build_logicops< build_arithmeticops<Base> > super;
- typedef typename super::integral_type integral_type;
-
- build_atomic_from_add(void) {}
- build_atomic_from_add(typename super::integral_type i) : super(i) {}
-};
-
-/*
-given a Base that implements:
-
-- load(memory_order order)
-- store(integral_type i, memory_order order)
-- compare_exchange_weak(integral_type &expected, integral_type desired, memory_order order)
-- compare_exchange_strong(integral_type &expected, integral_type desired, memory_order order)
-- exchange(integral_type replacement, memory_order order)
-
-generates the full set of atomic operations for integral types
-*/
-template<typename Base>
-class build_atomic_from_exchange : public build_logicops< build_arithmeticops< build_fetch_add<Base> > > {
-public:
- typedef build_logicops< build_arithmeticops< build_fetch_add<Base> > > super;
- typedef typename super::integral_type integral_type;
-
- build_atomic_from_exchange(void) {}
- build_atomic_from_exchange(typename super::integral_type i) : super(i) {}
-};
-
-
-/*
-given a Base that implements:
-
-- compare_exchange_weak()
-
-generates load, store and compare_exchange_weak for a smaller
-data type (e.g. an atomic "byte" embedded into a temporary
-and properly aligned atomic "int").
-*/
-template<typename Base, typename Type>
-class build_base_from_larger_type {
-public:
- typedef Type integral_type;
-
- build_base_from_larger_type() {}
- build_base_from_larger_type(integral_type t) {store(t, memory_order_relaxed);}
-
- integral_type load(memory_order order=memory_order_seq_cst) const volatile
- {
- larger_integral_type v=get_base().load(order);
- return extract(v);
- }
- bool compare_exchange_weak(integral_type &expected,
- integral_type desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- larger_integral_type expected_;
- larger_integral_type desired_;
-
- expected_=get_base().load(memory_order_relaxed);
- expected_=insert(expected_, expected);
- desired_=insert(expected_, desired);
- bool success=get_base().compare_exchange_weak(expected_, desired_, success_order, failure_order);
- expected=extract(expected_);
- return success;
- }
- void store(integral_type v,
- memory_order order=memory_order_seq_cst) volatile
- {
- larger_integral_type expected, desired;
- expected=get_base().load(memory_order_relaxed);
- do {
- desired=insert(expected, v);
- } while(!get_base().compare_exchange_weak(expected, desired, order, memory_order_relaxed));
- }
-
- bool is_lock_free(void)
- {
- return get_base().is_lock_free();
- }
-private:
- typedef typename Base::integral_type larger_integral_type;
-
- const Base &get_base(void) const volatile
- {
- intptr_t address=(intptr_t)this;
- address&=~(sizeof(larger_integral_type)-1);
- return *reinterpret_cast<const Base *>(address);
- }
- Base &get_base(void) volatile
- {
- intptr_t address=(intptr_t)this;
- address&=~(sizeof(larger_integral_type)-1);
- return *reinterpret_cast<Base *>(address);
- }
- unsigned int get_offset(void) const volatile
- {
- intptr_t address=(intptr_t)this;
- address&=(sizeof(larger_integral_type)-1);
- return address;
- }
-
- unsigned int get_shift(void) const volatile
- {
-#if defined(BOOST_LITTLE_ENDIAN)
- return get_offset()*8;
-#elif defined(BOOST_BIG_ENDIAN)
- return (sizeof(larger_integral_type)-sizeof(integral_type)-get_offset())*8;
-#else
- #error "Unknown endian"
-#endif
- }
-
- integral_type extract(larger_integral_type v) const volatile
- {
- return v>>get_shift();
- }
-
- larger_integral_type insert(larger_integral_type target, integral_type source) const volatile
- {
- larger_integral_type tmp=source;
- larger_integral_type mask=(larger_integral_type)-1;
-
- mask=~(mask<<(8*sizeof(integral_type)));
-
- mask=mask<<get_shift();
- tmp=tmp<<get_shift();
-
- tmp=(tmp & mask) | (target & ~mask);
-
- return tmp;
- }
-
- integral_type i;
-};
-
-/*
-given a Base that implements:
-
-- compare_exchange_weak()
-
-generates the full set of atomic ops for a smaller
-data type (e.g. an atomic "byte" embedded into a temporary
-and properly aligned atomic "int").
-*/
-template<typename Base, typename Type>
-class build_atomic_from_larger_type : public build_atomic_from_minimal< build_base_from_larger_type<Base, Type> > {
-public:
- typedef build_atomic_from_minimal< build_base_from_larger_type<Base, Type> > super;
- //typedef typename super::integral_type integral_type;
- typedef Type integral_type;
-
- build_atomic_from_larger_type() {}
- build_atomic_from_larger_type(integral_type v) : super(v) {}
-};
-
-}
-}
-}
-
-#endif
View
76 c_src/boost.atomic/boost/atomic/detail/fallback.hpp
@@ -1,76 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_FALLBACK_HPP
-#define BOOST_DETAIL_ATOMIC_FALLBACK_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <string.h>
-#include <boost/smart_ptr/detail/spinlock_pool.hpp>
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-template<typename T>
-class fallback_atomic {
-public:
- fallback_atomic(void) {}
- explicit fallback_atomic(const T &t) {memcpy(&i, &t, sizeof(T));}
-
- void store(const T &t, memory_order order=memory_order_seq_cst) volatile
- {
- detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
- memcpy((void*)&i, &t, sizeof(T));
- }
- T load(memory_order /*order*/=memory_order_seq_cst) volatile const
- {
- detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
- T tmp;
- memcpy(&tmp, (T*)&i, sizeof(T));
- return tmp;
- }
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order /*success_order*/,
- memory_order /*failure_order*/) volatile
- {
- detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
- if (memcmp((void*)&i, &expected, sizeof(T))==0) {
- memcpy((void*)&i, &desired, sizeof(T));
- return true;
- } else {
- memcpy(&expected, (void*)&i, sizeof(T));
- return false;
- }
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- T exchange(T replacement, memory_order /*order*/=memory_order_seq_cst) volatile
- {
- detail::spinlock_pool<0>::scoped_lock guard(const_cast<T*>(&i));
- T tmp;
- memcpy(&tmp, (void*)&i, sizeof(T));
- memcpy((void*)&i, &replacement, sizeof(T));
- return tmp;
- }
- bool is_lock_free(void) const volatile {return false;}
-protected:
- T i;
- typedef T integral_type;
-};
-
-}
-}
-}
-
-#endif
View
354 c_src/boost.atomic/boost/atomic/detail/gcc-alpha.hpp
@@ -1,354 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
-#define BOOST_DETAIL_ATOMIC_GCC_ALPHA_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/atomic/detail/base.hpp>
-#include <boost/atomic/detail/builder.hpp>
-
-/*
- Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
- (HP OpenVMS systems documentation) and the alpha reference manual.
- */
-
-/*
- NB: The most natural thing would be to write the increment/decrement
- operators along the following lines:
-
- __asm__ __volatile__(
- "1: ldl_l %0,%1 \n"
- "addl %0,1,%0 \n"
- "stl_c %0,%1 \n"
- "beq %0,1b\n"
- : "=&b" (tmp)
- : "m" (value)
- : "cc"
- );
-
- However according to the comments on the HP website and matching
- comments in the Linux kernel sources this defies branch prediction,
- as the cpu assumes that backward branches are always taken; so
- instead copy the trick from the Linux kernel, introduce a forward
- branch and back again.
-
- I have, however, had a hard time measuring the difference between
- the two versions in microbenchmarks -- I am leaving it in nevertheless
- as it apparently does not hurt either.
-*/
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-static inline void fence_before(memory_order order)
-{
- switch(order) {
- case memory_order_consume:
- case memory_order_release:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- __asm__ __volatile__ ("mb" ::: "memory");
- default:;
- }
-}
-
-static inline void fence_after(memory_order order)
-{
- switch(order) {
- case memory_order_acquire:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- __asm__ __volatile__ ("mb" ::: "memory");
- default:;
- }
-}
-
-template<>
-inline void platform_atomic_thread_fence(memory_order order)
-{
- switch(order) {
- case memory_order_acquire:
- case memory_order_consume:
- case memory_order_release:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- __asm__ __volatile__ ("mb" ::: "memory");
- default:;
- }
-}
-
-template<typename T>
-class atomic_alpha_32 {
-public:
- typedef T integral_type;
- explicit atomic_alpha_32(T v) : i(v) {}
- atomic_alpha_32() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const int *>(&i);
- fence_after(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- fence_before(order);
- *reinterpret_cast<volatile int *>(&i)=(int)v;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- int current, success;
- __asm__ __volatile__(
- "1: ldl_l %2, %4\n"
- "cmpeq %2, %0, %3\n"
- "mov %2, %0\n"
- "beq %3, 3f\n"
- "stl_c %1, %4\n"
- "2:\n"
-
- ".subsection 2\n"
- "3: mov %3, %1\n"
- "br 2b\n"
- ".previous\n"
-
- : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
- : "m" (i)
- :
- );
- if (desired) fence_after(success_order);
- else fence_after(failure_order);
- return desired;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- inline T fetch_add_var(T c, memory_order order) volatile
- {
- fence_before(order);
- T original, modified;
- __asm__ __volatile__(
- "1: ldl_l %0, %2\n"
- "addl %0, %3, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
-
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
-
- : "=&r" (original), "=&r" (modified)
- : "m" (i), "r" (c)
- :
- );
- fence_after(order);
- return original;
- }
- inline T fetch_inc(memory_order order) volatile
- {
- fence_before(order);
- int original, modified;
- __asm__ __volatile__(
- "1: ldl_l %0, %2\n"
- "addl %0, 1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
-
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
-
- : "=&r" (original), "=&r" (modified)
- : "m" (i)
- :
- );
- fence_after(order);
- return original;
- }
- inline T fetch_dec(memory_order order) volatile
- {
- fence_before(order);
- int original, modified;
- __asm__ __volatile__(
- "1: ldl_l %0, %2\n"
- "subl %0, 1, %1\n"
- "stl_c %1, %2\n"
- "beq %1, 2f\n"
-
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
-
- : "=&r" (original), "=&r" (modified)
- : "m" (i)
- :
- );
- fence_after(order);
- return original;
- }
-private:
- T i;
-};
-
-template<typename T>
-class atomic_alpha_64 {
-public:
- typedef T integral_type;
- explicit atomic_alpha_64(T v) : i(v) {}
- atomic_alpha_64() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- fence_after(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- int current, success;
- __asm__ __volatile__(
- "1: ldq_l %2, %4\n"
- "cmpeq %2, %0, %3\n"
- "mov %2, %0\n"
- "beq %3, 3f\n"
- "stq_c %1, %4\n"
- "2:\n"
-
- ".subsection 2\n"
- "3: mov %3, %1\n"
- "br 2b\n"
- ".previous\n"
-
- : "+&r" (expected), "+&r" (desired), "=&r"(current), "=&r"(success)
- : "m" (i)
- :
- );
- if (desired) fence_after(success_order);
- else fence_after(failure_order);
- return desired;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- inline T fetch_add_var(T c, memory_order order) volatile
- {
- fence_before(order);
- T original, modified;
- __asm__ __volatile__(
- "1: ldq_l %0, %2\n"
- "addq %0, %3, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
-
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
-
- : "=&r" (original), "=&r" (modified)
- : "m" (i), "r" (c)
- :
- );
- fence_after(order);
- return original;
- }
- inline T fetch_inc(memory_order order) volatile
- {
- fence_before(order);
- T original, modified;
- __asm__ __volatile__(
- "1: ldq_l %0, %2\n"
- "addq %0, 1, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
-
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
-
- : "=&r" (original), "=&r" (modified)
- : "m" (i)
- :
- );
- fence_after(order);
- return original;
- }
- inline T fetch_dec(memory_order order) volatile
- {
- fence_before(order);
- T original, modified;
- __asm__ __volatile__(
- "1: ldq_l %0, %2\n"
- "subq %0, 1, %1\n"
- "stq_c %1, %2\n"
- "beq %1, 2f\n"
-
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous\n"
-
- : "=&r" (original), "=&r" (modified)
- : "m" (i)
- :
- );
- fence_after(order);
- return original;
- }
-private:
- T i;
-};
-
-template<typename T>
-class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > {
-public:
- typedef build_atomic_from_typical<build_exchange<atomic_alpha_32<T> > > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > {
-public:
- typedef build_atomic_from_typical<build_exchange<atomic_alpha_64<T> > > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
-public:
- typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> {
-public:
- typedef build_atomic_from_larger_type<atomic_alpha_32<uint32_t>, T> super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-}
-}
-}
-
-#endif
View
299 c_src/boost.atomic/boost/atomic/detail/gcc-armv6+.hpp
@@ -1,299 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_GCC_ARMV6P_HPP
-#define BOOST_DETAIL_ATOMIC_GCC_ARMV6P_HPP
-
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-//
-// Copyright (c) 2009 Helge Bahmann
-// Copyright (c) 2009 Phil Endecott
-// ARM Code by Phil Endecott, based on other architectures.
-
-
-#include <boost/memory_order.hpp>
-#include <boost/atomic/detail/base.hpp>
-#include <boost/atomic/detail/builder.hpp>
-
-// From the ARM Architecture Reference Manual for architecture v6:
-//
-// LDREX{<cond>} <Rd>, [<Rn>]
-// <Rd> Specifies the destination register for the memory word addressed by <Rd>
-// <Rn> Specifies the register containing the address.
-//
-// STREX{<cond>} <Rd>, <Rm>, [<Rn>]
-// <Rd> Specifies the destination register for the returned status value.
-// 0 if the operation updates memory
-// 1 if the operation fails to update memory
-// <Rm> Specifies the register containing the word to be stored to memory.
-// <Rn> Specifies the register containing the address.
-// Rd must not be the same register is Rm or Rn.
-//
-// ARM v7 is like ARM v6 plus:
-// There are half-word and byte versions of the LDREX and STREX instructions,
-// LDREXH, LDREXB, STREXH and STREXB.
-// There are also double-word versions, LDREXD and STREXD.
-// (Actually it looks like these are available from version 6k onwards.)
-// FIXME these are not yet used; should be mostly a matter of copy-and-paste.
-// I think you can supply an immediate offset to the address.
-//
-// A memory barrier is effected using a "co-processor 15" instruction,
-// though a separate assembler mnemonic is available for it in v7.
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-
-// "Thumb 1" is a subset of the ARM instruction set that uses a 16-bit encoding. It
-// doesn't include all instructions and in particular it doesn't include the co-processor
-// instruction used for the memory barrier or the load-locked/store-conditional
-// instructions. So, if we're compiling in "Thumb 1" mode, we need to wrap all of our
-// asm blocks with code to temporarily change to ARM mode.
-//
-// You can only change between ARM and Thumb modes when branching using the bx instruction.
-// bx takes an address specified in a register. The least significant bit of the address
-// indicates the mode, so 1 is added to indicate that the destination code is Thumb.
-// A temporary register is needed for the address and is passed as an argument to these
-// macros. It must be one of the "low" registers accessible to Thumb code, specified
-// usng the "l" attribute in the asm statement.
-//
-// Architecture v7 introduces "Thumb 2", which does include (almost?) all of the ARM
-// instruction set. So in v7 we don't need to change to ARM mode; we can write "universal
-// assembler" which will assemble to Thumb 2 or ARM code as appropriate. The only thing
-// we need to do to make this "universal" assembler mode work is to insert "IT" instructions
-// to annotate the conditional instructions. These are ignored in other modes (e.g. v6),
-// so they can always be present.
-
-#if defined(__thumb__) && !defined(__ARM_ARCH_7A__)
-// FIXME also other v7 variants.
-#define BOOST_ATOMIC_ARM_ASM_START(TMPREG) "adr " #TMPREG ", 1f\n" "bx " #TMPREG "\n" ".arm\n" ".align 4\n" "1: "
-#define BOOST_ATOMIC_ARM_ASM_END(TMPREG) "adr " #TMPREG ", 1f + 1\n" "bx " #TMPREG "\n" ".thumb\n" ".align 2\n" "1: "
-
-#else
-// The tmpreg is wasted in this case, which is non-optimal.
-#define BOOST_ATOMIC_ARM_ASM_START(TMPREG)
-#define BOOST_ATOMIC_ARM_ASM_END(TMPREG)
-#endif
-
-
-#if defined(__ARM_ARCH_7A__)
-// FIXME ditto.
-#define BOOST_ATOMIC_ARM_DMB "dmb\n"
-#else
-#define BOOST_ATOMIC_ARM_DMB "mcr\tp15, 0, r0, c7, c10, 5\n"
-#endif
-
-// There is also a "Data Synchronisation Barrier" DSB; this exists in v6 as another co-processor
-// instruction like the above.
-
-
-static inline void fence_before(memory_order order)
-{
- // FIXME I don't understand enough about barriers to know what this should do.
- switch(order) {
- case memory_order_release:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- int brtmp;
- __asm__ __volatile__ (
- BOOST_ATOMIC_ARM_ASM_START(%0)
- BOOST_ATOMIC_ARM_DMB
- BOOST_ATOMIC_ARM_ASM_END(%0)
- : "=&l" (brtmp) :: "memory"
- );
- default:;
- }
-}
-
-static inline void fence_after(memory_order order)
-{
- // FIXME I don't understand enough about barriers to know what this should do.
- switch(order) {
- case memory_order_acquire:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- int brtmp;
- __asm__ __volatile__ (
- BOOST_ATOMIC_ARM_ASM_START(%0)
- BOOST_ATOMIC_ARM_DMB
- BOOST_ATOMIC_ARM_ASM_END(%0)
- : "=&l" (brtmp) :: "memory"
- );
- case memory_order_consume:
- __asm__ __volatile__ ("" ::: "memory");
- default:;
- }
-}
-
-#undef BOOST_ATOMIC_ARM_DMB
-
-
-template<typename T>
-class atomic_arm_4 {
-public:
- typedef T integral_type;
- explicit atomic_arm_4(T v) : i(v) {}
- atomic_arm_4() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=const_cast<volatile const T &>(i);
- fence_after(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- fence_before(order);
- const_cast<volatile T &>(i)=v;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- int success;
- int tmp;
- __asm__ __volatile__(
- BOOST_ATOMIC_ARM_ASM_START(%2)
- "mov %1, #0\n" // success = 0
- "ldrex %0, [%3]\n" // expected' = *(&i)
- "teq %0, %4\n" // flags = expected'==expected
- "ittt eq\n"
- "strexeq %2, %5, [%3]\n" // if (flags.equal) *(&i) = desired, tmp = !OK
- "teqeq %2, #0\n" // if (flags.equal) flags = tmp==0
- "moveq %1, #1\n" // if (flags.equal) success = 1
- BOOST_ATOMIC_ARM_ASM_END(%2)
- : "=&r" (expected), // %0
- "=&r" (success), // %1
- "=&l" (tmp) // %2
- : "r" (&i), // %3
- "r" (expected), // %4
- "r" ((int)desired) // %5
- : "cc"
- );
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- return success;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- inline T fetch_add_var(T c, memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- int tmp2;
- __asm__ __volatile__(
- BOOST_ATOMIC_ARM_ASM_START(%2)
- "1: ldrex %0, [%3]\n" // original = *(&i)
- "add %1, %0, %4\n" // tmp = original + c
- "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK
- "teq %2, #0\n" // flags = tmp2==0
- "it ne\n"
- "bne 1b\n" // if (!flags.equal) goto 1
- BOOST_ATOMIC_ARM_ASM_END(%2)
- : "=&r" (original), // %0
- "=&r" (tmp), // %1
- "=&l" (tmp2) // %2
- : "r" (&i), // %3
- "r" (c) // %4
- : "cc"
- );
- fence_after(order);
- return original;
- }
- inline T fetch_inc(memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- int tmp2;
- __asm__ __volatile__(
- BOOST_ATOMIC_ARM_ASM_START(%2)
- "1: ldrex %0, [%3]\n" // original = *(&i)
- "add %1, %0, #1\n" // tmp = original + 1
- "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK
- "teq %2, #0\n" // flags = tmp2==0
- "it ne\n"
- "bne 1b\n" // if (!flags.equal) goto 1
- BOOST_ATOMIC_ARM_ASM_END(%2)
- : "=&r" (original), // %0
- "=&r" (tmp), // %1
- "=&l" (tmp2) // %2
- : "r" (&i) // %3
- : "cc"
- );
- fence_after(order);
- return original;
- }
- inline T fetch_dec(memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- int tmp2;
- __asm__ __volatile__(
- BOOST_ATOMIC_ARM_ASM_START(%2)
- "1: ldrex %0, [%3]\n" // original = *(&i)
- "sub %1, %0, #1\n" // tmp = original - 1
- "strex %2, %1, [%3]\n" // *(&i) = tmp; tmp2 = !OK
- "teq %2, #0\n" // flags = tmp2==0
- "it ne\n"
- "bne 1b\n" // if (!flags.equal) goto 1
- BOOST_ATOMIC_ARM_ASM_END(%2)
- : "=&r" (original), // %0
- "=&r" (tmp), // %1
- "=&l" (tmp2) // %2
- : "r" (&i) // %3
- : "cc"
- );
- fence_after(order);
- return original;
- }
-private:
- T i;
-};
-
-
-// #ifdef _ARM_ARCH_7
-// FIXME TODO can add native byte and halfword version here
-
-
-template<typename T>
-class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_arm_4<T> > > {
-public:
- typedef build_atomic_from_typical<build_exchange<atomic_arm_4<T> > > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> {
-public:
- typedef build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> {
-public:
- typedef build_atomic_from_larger_type<atomic_arm_4<uint32_t>, T> super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-
-
-typedef build_exchange<atomic_arm_4<void *> > platform_atomic_address;
-
-}
-}
-}
-
-#undef BOOST_ATOMIC_ARM_ASM_START
-#undef BOOST_ATOMIC_ARM_ASM_END
-
-
-#endif
View
351 c_src/boost.atomic/boost/atomic/detail/gcc-ppc.hpp
@@ -1,351 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
-#define BOOST_DETAIL_ATOMIC_GCC_PPC_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/atomic/detail/base.hpp>
-#include <boost/atomic/detail/builder.hpp>
-
-/*
- Refer to: Motorola: "Programming Environments Manual for 32-Bit
- Implementations of the PowerPC Architecture", Appendix E:
- "Synchronization Programming Examples" for an explanation of what is
- going on here (can be found on the web at various places by the
- name "MPCFPE32B.pdf", Google is your friend...)
- */
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-static inline void fence_before(memory_order order)
-{
- switch(order) {
- case memory_order_release:
- case memory_order_acq_rel:
-#if defined(__powerpc64__)
- __asm__ __volatile__ ("lwsync" ::: "memory");
- break;
-#endif
- case memory_order_seq_cst:
- __asm__ __volatile__ ("sync" ::: "memory");
- default:;
- }
-}
-
-/* Note on the barrier instructions used by fence_after and
-atomic_thread_fence: the "isync" instruction normally does
-not wait for memory-accessing operations to complete, the
-"trick" is to introduce a conditional branch that formally
-depends on the memory-accessing instruction -- isync waits
-until the branch can be resolved and thus implicitly until
-the memory access completes.
-
-This means that the load(memory_order_relaxed) instruction
-includes this branch, even though no barrier would be required
-here, but as a consequence atomic_thread_fence(memory_order_acquire)
-would have to be implemented using "sync" instead of "isync".
-The following simple cost-analysis provides the rationale
-for this decision:
-
-- isync: about ~12 cycles
-- sync: about ~50 cycles
-- "spurious" branch after load: 1-2 cycles
-- making the right decision: priceless
-
-*/
-
-static inline void fence_after(memory_order order)
-{
- switch(order) {
- case memory_order_acquire:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- __asm__ __volatile__ ("isync");
- case memory_order_consume:
- __asm__ __volatile__ ("" ::: "memory");
- default:;
- }
-}
-
-template<>
-inline void platform_atomic_thread_fence(memory_order order)
-{
- switch(order) {
- case memory_order_acquire:
- __asm__ __volatile__ ("isync" ::: "memory");
- break;
- case memory_order_release:
- case memory_order_acq_rel:
-#if defined(__powerpc64__)
- __asm__ __volatile__ ("lwsync" ::: "memory");
- break;
-#endif
- case memory_order_seq_cst:
- __asm__ __volatile__ ("sync" ::: "memory");
- default:;
- }
-}
-
-
-/* note: the __asm__ constraint "b" instructs gcc to use any register
-except r0; this is required because r0 is not allowed in
-some places. Since I am sometimes unsure if it is allowed
-or not just play it safe and avoid r0 entirely -- ppc isn't
-exactly register-starved, so this really should not matter :) */
-
-template<typename T>
-class atomic_ppc_32 {
-public:
- typedef T integral_type;
- explicit atomic_ppc_32(T v) : i(v) {}
- atomic_ppc_32() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- __asm__ __volatile__ (
- "cmpw %0, %0\n"
- "bne- 1f\n"
- "1f:\n"
- : "+b"(v));
- fence_after(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- int success;
- __asm__ __volatile__(
- "lwarx %0,0,%2\n"
- "cmpw %0, %3\n"
- "bne- 2f\n"
- "stwcx. %4,0,%2\n"
- "bne- 2f\n"
- "addi %1,0,1\n"
- "1:"
-
- ".subsection 2\n"
- "2: addi %1,0,0\n"
- "b 1b\n"
- ".previous\n"
- : "=&b" (expected), "=&b" (success)
- : "b" (&i), "b" (expected), "b" ((int)desired)
- );
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- return success;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- inline T fetch_add_var(T c, memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- __asm__ __volatile__(
- "1: lwarx %0,0,%2\n"
- "add %1,%0,%3\n"
- "stwcx. %1,0,%2\n"
- "bne- 1b\n"
- : "=&b" (original), "=&b" (tmp)
- : "b" (&i), "b" (c)
- : "cc");
- fence_after(order);
- return original;
- }
- inline T fetch_inc(memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- __asm__ __volatile__(
- "1: lwarx %0,0,%2\n"
- "addi %1,%0,1\n"
- "stwcx. %1,0,%2\n"
- "bne- 1b\n"
- : "=&b" (original), "=&b" (tmp)
- : "b" (&i)
- : "cc");
- fence_after(order);
- return original;
- }
- inline T fetch_dec(memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- __asm__ __volatile__(
- "1: lwarx %0,0,%2\n"
- "addi %1,%0,-1\n"
- "stwcx. %1,0,%2\n"
- "bne- 1b\n"
- : "=&b" (original), "=&b" (tmp)
- : "b" (&i)
- : "cc");
- fence_after(order);
- return original;
- }
-private:
- T i;
-};
-
-#if defined(__powerpc64__)
-
-#warning Untested code -- please inform me if it works
-
-template<typename T>
-class atomic_ppc_64 {
-public:
- typedef T integral_type;
- explicit atomic_ppc_64(T v) : i(v) {}
- atomic_ppc_64() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- __asm__ __volatile__ (
- "cmpw %0, %0\n"
- "bne- 1f\n"
- "1f:\n"
- : "+b"(v));
- fence_after(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- int success;
- __asm__ __volatile__(
- "ldarx %0,0,%2\n"
- "cmpw %0, %3\n"
- "bne- 2f\n"
- "stdcx. %4,0,%2\n"
- "bne- 2f\n"
- "addi %1,0,1\n"
- "1:"
-
- ".subsection 2\n"
- "2: addi %1,0,0\n"
- "b 1b\n"
- ".previous\n"
- : "=&b" (expected), "=&b" (success)
- : "b" (&i), "b" (expected), "b" ((int)desired)
- );
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- fence_after(order);
- return success;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- inline T fetch_add_var(T c, memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- __asm__ __volatile__(
- "1: ldarx %0,0,%2\n"
- "add %1,%0,%3\n"
- "stdcx. %1,0,%2\n"
- "bne- 1b\n"
- : "=&b" (original), "=&b" (tmp)
- : "b" (&i), "b" (c)
- : "cc");
- fence_after(order);
- return original;
- }
- inline T fetch_inc(memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- __asm__ __volatile__(
- "1: ldarx %0,0,%2\n"
- "addi %1,%0,1\n"
- "stdcx. %1,0,%2\n"
- "bne- 1b\n"
- : "=&b" (original), "=&b" (tmp)
- : "b" (&i)
- : "cc");
- fence_after(order);
- return original;
- }
- inline T fetch_dec(memory_order order) volatile
- {
- fence_before(order);
- T original, tmp;
- __asm__ __volatile__(
- "1: ldarx %0,0,%2\n"
- "addi %1,%0,-1\n"
- "stdcx. %1,0,%2\n"
- "bne- 1b\n"
- : "=&b" (original), "=&b" (tmp)
- : "b" (&i)
- : "cc");
- fence_after(order);
- return original;
- }
-private:
- T i;
-};
-#endif
-
-template<typename T>
-class platform_atomic_integral<T, 4> : public build_atomic_from_typical<build_exchange<atomic_ppc_32<T> > > {
-public:
- typedef build_atomic_from_typical<build_exchange<atomic_ppc_32<T> > > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 1>: public build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> {
-public:
- typedef build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class platform_atomic_integral<T, 2>: public build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> {
-public:
- typedef build_atomic_from_larger_type<atomic_ppc_32<uint32_t>, T> super;
-
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-#if defined(__powerpc64__)
-template<typename T>
-class platform_atomic_integral<T, 8> : public build_atomic_from_typical<build_exchange<atomic_ppc_64<T> > > {
-public:
- typedef build_atomic_from_typical<build_exchange<atomic_ppc_64<T> > > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-#endif
-
-}
-}
-}
-
-#endif
View
454 c_src/boost.atomic/boost/atomic/detail/gcc-x86.hpp
@@ -1,454 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_GCC_X86_HPP
-#define BOOST_DETAIL_ATOMIC_GCC_X86_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <boost/atomic/detail/base.hpp>
-#include <boost/atomic/detail/builder.hpp>
-
-namespace boost {
-namespace detail {
-namespace atomic {
-
-static inline void fence_before(memory_order order)
-{
- switch(order) {
- case memory_order_consume:
- case memory_order_release:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- __asm__ __volatile__ ("" ::: "memory");
- default:;
- }
-}
-
-static inline void fence_after(memory_order order)
-{
- switch(order) {
- case memory_order_acquire:
- case memory_order_acq_rel:
- case memory_order_seq_cst:
- __asm__ __volatile__ ("" ::: "memory");
- default:;
- }
-}
-
-static inline void full_fence(void)
-{
-#if defined(__amd64__)
- __asm__ __volatile__("mfence" ::: "memory");
-#else
- /* could use mfence iff i686, but it does not appear to matter much */
- __asm__ __volatile__("lock; addl $0, (%%esp)" ::: "memory");
-#endif
-}
-
-static inline void fence_after_load(memory_order order)
-{
- switch(order) {
- case memory_order_seq_cst:
- full_fence();
- case memory_order_acquire:
- case memory_order_acq_rel:
- __asm__ __volatile__ ("" ::: "memory");
- default:;
- }
-}
-
-template<>
-inline void platform_atomic_thread_fence(memory_order order)
-{
- switch(order) {
- case memory_order_seq_cst:
- full_fence();
- case memory_order_acquire:
- case memory_order_consume:
- case memory_order_acq_rel:
- case memory_order_release:
- __asm__ __volatile__ ("" ::: "memory");
- default:;
- }
-}
-
-template<typename T>
-class atomic_x86_8 {
-public:
- explicit atomic_x86_8(T v) : i(v) {}
- atomic_x86_8() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- fence_after_load(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- if (order!=memory_order_seq_cst) {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- } else {
- exchange(v);
- }
- }
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- T prev=expected;
- __asm__ __volatile__("lock; cmpxchgb %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory");
- bool success=(prev==expected);
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- expected=prev;
- return success;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- T exchange(T r, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("xchgb %0, %1\n" : "=q" (r) : "m"(i), "0" (r) : "memory");
- return r;
- }
- T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("lock; xaddb %0, %1" : "+q" (c), "+m" (i) :: "memory");
- return c;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- typedef T integral_type;
-private:
- T i;
-};
-
-template<typename T>
-class platform_atomic_integral<T, 1> : public build_atomic_from_add<atomic_x86_8<T> > {
-public:
- typedef build_atomic_from_add<atomic_x86_8<T> > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class atomic_x86_16 {
-public:
- explicit atomic_x86_16(T v) : i(v) {}
- atomic_x86_16() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- fence_after_load(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- if (order!=memory_order_seq_cst) {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- } else {
- exchange(v);
- }
- }
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- T prev=expected;
- __asm__ __volatile__("lock; cmpxchgw %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory");
- bool success=(prev==expected);
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- expected=prev;
- return success;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- T exchange(T r, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("xchgw %0, %1\n" : "=r" (r) : "m"(i), "0" (r) : "memory");
- return r;
- }
- T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("lock; xaddw %0, %1" : "+r" (c), "+m" (i) :: "memory");
- return c;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- typedef T integral_type;
-private:
- T i;
-};
-
-template<typename T>
-class platform_atomic_integral<T, 2> : public build_atomic_from_add<atomic_x86_16<T> > {
-public:
- typedef build_atomic_from_add<atomic_x86_16<T> > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-template<typename T>
-class atomic_x86_32 {
-public:
- explicit atomic_x86_32(T v) : i(v) {}
- atomic_x86_32() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- fence_after_load(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- if (order!=memory_order_seq_cst) {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- } else {
- exchange(v);
- }
- }
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- T prev=expected;
- __asm__ __volatile__("lock; cmpxchgl %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory");
- bool success=(prev==expected);
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- expected=prev;
- return success;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- T exchange(T r, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("xchgl %0, %1\n" : "=r" (r) : "m"(i), "0" (r) : "memory");
- return r;
- }
- T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("lock; xaddl %0, %1" : "+r" (c), "+m" (i) :: "memory");
- return c;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- typedef T integral_type;
-private:
- T i;
-};
-
-template<typename T>
-class platform_atomic_integral<T, 4> : public build_atomic_from_add<atomic_x86_32<T> > {
-public:
- typedef build_atomic_from_add<atomic_x86_32<T> > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-
-#if defined(__amd64__)
-template<typename T>
-class atomic_x86_64 {
-public:
- explicit atomic_x86_64(T v) : i(v) {}
- atomic_x86_64() {}
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- T v=*reinterpret_cast<volatile const T *>(&i);
- fence_after_load(order);
- return v;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- if (order!=memory_order_seq_cst) {
- fence_before(order);
- *reinterpret_cast<volatile T *>(&i)=v;
- } else {
- exchange(v);
- }
- }
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- fence_before(success_order);
- T prev=expected;
- __asm__ __volatile__("lock; cmpxchgq %1, %2\n" : "=a" (prev) : "q" (desired), "m" (i), "a" (expected) : "memory");
- bool success=(prev==expected);
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- expected=prev;
- return success;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- T exchange(T r, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("xchgq %0, %1\n" : "=r" (r) : "m"(i), "0" (r) : "memory");
- return r;
- }
- T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile
- {
- __asm__ __volatile__("lock; xaddq %0, %1" : "+r" (c), "+m" (i) :: "memory");
- return c;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- typedef T integral_type;
-private:
- T i;
-} __attribute__((aligned(8)));
-
-#elif defined(__i686__)
-
-template<typename T>
-class atomic_x86_64 {
-private:
- typedef atomic_x86_64 this_type;
-public:
- explicit atomic_x86_64(T v) : i(v) {}
- atomic_x86_64() {}
-
- bool compare_exchange_strong(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- long scratch;
- fence_before(success_order);
- T prev=expected;
- /* Make sure ebx is saved and restored properly in case
- this object is compiled as "position independent". Since
- programmers on x86 tend to forget specifying -DPIC or
- similar, always assume PIC.
-
- To make this work uniformly even in the non-PIC case,
- setup register constraints such that ebx can not be
- used by accident e.g. as base address for the variable
- to be modified. Accessing "scratch" should always be okay,
- as it can only be placed on the stack (and therefore
- accessed through ebp or esp only).
-
- In theory, could push/pop ebx onto/off the stack, but movs
- to a prepared stack slot turn out to be faster. */
- __asm__ __volatile__(
- "movl %%ebx, %1\n"
- "movl %2, %%ebx\n"
- "lock; cmpxchg8b 0(%4)\n"
- "movl %1, %%ebx\n"
- : "=A" (prev), "=m" (scratch)
- : "D" ((long)desired), "c" ((long)(desired>>32)), "S" (&i), "0" (prev)
- : "memory");
- bool success=(prev==expected);
- if (success) fence_after(success_order);
- else fence_after(failure_order);
- expected=prev;
- return success;
- }
- bool compare_exchange_weak(
- T &expected,
- T desired,
- memory_order success_order,
- memory_order failure_order) volatile
- {
- return compare_exchange_strong(expected, desired, success_order, failure_order);
- }
- T exchange(T r, memory_order order=memory_order_seq_cst) volatile
- {
- T prev=i;
- do {} while(!compare_exchange_strong(prev, r, order, memory_order_relaxed));
- return prev;
- }
-
- T load(memory_order order=memory_order_seq_cst) const volatile
- {
- /* this is a bit problematic -- there is no other
- way to atomically load a 64 bit value, but of course
- compare_exchange requires write access to the memory
- area */
- T expected=i;
- do { } while(!const_cast<this_type *>(this)->compare_exchange_strong(expected, expected, order, memory_order_relaxed));
- return expected;
- }
- void store(T v, memory_order order=memory_order_seq_cst) volatile
- {
- exchange(v, order);
- }
- T fetch_add(T c, memory_order order=memory_order_seq_cst) volatile
- {
- T expected=i, desired;;
- do {
- desired=expected+c;
- } while(!compare_exchange_strong(expected, desired, order, memory_order_relaxed));
- return expected;
- }
-
- bool is_lock_free(void) const volatile {return true;}
-protected:
- typedef T integral_type;
-private:
- T i;
-} __attribute__((aligned(8))) ;
-
-#endif
-
-#if defined(__amd64__) || defined(__i686__)
-template<typename T>
-class platform_atomic_integral<T, 8> : public build_atomic_from_add<atomic_x86_64<T> >{
-public:
- typedef build_atomic_from_add<atomic_x86_64<T> > super;
- explicit platform_atomic_integral(T v) : super(v) {}
- platform_atomic_integral(void) {}
-};
-#endif
-
-}
-}
-}
-
-#endif
View
192 c_src/boost.atomic/boost/atomic/detail/generic-cas.hpp
@@ -1,192 +0,0 @@
-#ifndef BOOST_DETAIL_ATOMIC_GENERIC_CAS_HPP
-#define BOOST_DETAIL_ATOMIC_GENERIC_CAS_HPP
-
-// Copyright (c) 2009 Helge Bahmann
-//
-// Distributed under the Boost Software License, Version 1.0.
-// See accompanying file LICENSE_1_0.txt or copy at
-// http://www.boost.org/LICENSE_1_0.txt)
-
-#include <stdint.h>
-
-#include <boost/memory_order.hpp>
-#include <boost/atomic/detail/base.hpp>
-#include <boost/atomic/detail/builder.hpp>
-
-/* fallback implementation for various compilation targets;
-this is *not* efficient, particularly because all operations
-are fully fenced (full memory barriers before and after
-each operation) */
-
-#if defined(__GNUC__)
- namespace boost { namespace detail { namespace atomic {
- static inline int32_t
- fenced_compare_exchange_strong_32(volatile int32_t *ptr, int32_t expected, int32_t desired)
- {
- return __sync_val_compare_and_swap_4(ptr, expected, desired);
- }
- #define BOOST_ATOMIC_HAVE_CAS32 1
-
- #if defined(__amd64__) || defined(__i686__)
- static inline int64_t
- fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
- {
- return __sync_val_compare_and_swap_8(ptr, expected, desired);
- }
- #define BOOST_ATOMIC_HAVE_CAS64 1
- #endif
- }}}
-
-#elif defined(__ICL) || defined(_MSC_VER)
-
- #if defined(_MSC_VER)
- #include <Windows.h>
- #include <intrin.h>
- #endif
-
- namespace boost { namespace detail { namespace atomic {
- static inline int32_t
- fenced_compare_exchange_strong(int32_t *ptr, int32_t expected, int32_t desired)
- {
- return _InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), desired, expected);
- }
- #define BOOST_ATOMIC_HAVE_CAS32 1
- #if defined(_WIN64)
- static inline int64_t
- fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
- {
- return _InterlockedCompareExchange64(ptr, desired, expected);
- }
- #define BOOST_ATOMIC_HAVE_CAS64 1
- #endif
- }}}
-
-#elif (defined(__ICC) || defined(__ECC))
- namespace boost { namespace detail { namespace atomic {
- static inline int32_t
- fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
- {
- return _InterlockedCompareExchange((void*)ptr, desired, expected);
- }
- #define BOOST_ATOMIC_HAVE_CAS32 1
- #if defined(__x86_64)
- static inline int64_t
- fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
- {
- return cas64<int>(ptr, expected, desired);
- }
- #define BOOST_ATOMIC_HAVE_CAS64 1
- #elif defined(__ECC) //IA-64 version
- static inline int64_t
- fenced_compare_exchange_strong(int64_t *ptr, int64_t expected, int64_t desired)
- {
- return _InterlockedCompareExchange64((void*)ptr, desired, expected);
- }
- #define BOOST_ATOMIC_HAVE_CAS64 1
- #endif
- }}}
-
-#elif (defined(__SUNPRO_CC) && defined(__sparc))
- #include <sys/atomic.h>
- namespace boost { namespace detail { namespace atomic {
- static inline int32_t
- fenced_compare_exchange_strong_32(int32_t *ptr, int32_t expected, int32_t desired)
- {
- return atomic_cas_32((volatile unsigned int*)ptr, expected, desired);
- }
- #define BOOST_ATOMIC_HAVE_CAS32 1
-
- /* FIXME: check for 64 bit mode */
- static inline int64_t
- fenced_compare_exchange_strong_64(int64_t *ptr, int64_t expected, int64_t desired)
- {
- return atomic_cas_64((volatile unsigned long long*)ptr, expected, desired);
- }
- #define BOOST_ATOMIC_HAVE_CAS64 1
- }}}
-#endif