|
| 1 | +// -*- C++ -*- |
| 2 | +//===--------------------------- __atomic_locked --------------------------===// |
| 3 | +// |
| 4 | +// The LLVM Compiler Infrastructure |
| 5 | +// |
| 6 | +// This file is distributed under the University of Illinois Open Source |
| 7 | +// License. See LICENSE.TXT for details. |
| 8 | +// |
| 9 | +//===----------------------------------------------------------------------===// |
| 10 | + |
| 11 | +#ifndef _LIBCPP_ATOMIC_LOCKED |
| 12 | +#define _LIBCPP_ATOMIC_LOCKED |
| 13 | + |
| 14 | +#include <__mutex_base> // for mutex and lock_guard |
| 15 | + |
| 16 | +/** |
| 17 | + This provides slow-but-usable lock-based atomic access to |
| 18 | + structures for which atomic lock-free functions are missing. |
| 19 | + This is motivated by the desire for 64b atomic operations |
| 20 | + on 32b PowerPC architectures. |
| 21 | +**/ |
| 22 | + |
| 23 | +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) |
| 24 | +#pragma GCC system_header |
| 25 | +#endif |
| 26 | + |
| 27 | +_LIBCPP_BEGIN_NAMESPACE_STD |
| 28 | + |
| 29 | +template <class _Tp, bool = is_integral<_Tp>::value && !is_same<_Tp, bool>::value> |
| 30 | +struct __atomic_mutex_locked // false |
| 31 | +{ |
| 32 | + mutable _Atomic(_Tp) __a_; |
| 33 | + mutable mutex __lock_; |
| 34 | + typedef lock_guard<mutex> lock_type; |
| 35 | + |
| 36 | + _Tp& na(void) const { return reinterpret_cast<_Tp&>(__a_); } |
| 37 | + volatile _Tp& na(void) const volatile { return reinterpret_cast<volatile _Tp&>(__a_); } |
| 38 | + |
| 39 | + _LIBCPP_INLINE_VISIBILITY |
| 40 | + bool is_lock_free() const volatile _NOEXCEPT |
| 41 | + {return false;} |
| 42 | + _LIBCPP_INLINE_VISIBILITY |
| 43 | + bool is_lock_free() const _NOEXCEPT |
| 44 | + {return false;} |
| 45 | + _LIBCPP_INLINE_VISIBILITY |
| 46 | + void store(_Tp __d, memory_order = memory_order_seq_cst) volatile _NOEXCEPT |
| 47 | + { const lock_type g(const_cast<mutex&>(__lock_)); na() = __d; } |
| 48 | + _LIBCPP_INLINE_VISIBILITY |
| 49 | + void store(_Tp __d, memory_order = memory_order_seq_cst) _NOEXCEPT |
| 50 | + { const lock_type g(__lock_); na() = __d; } |
| 51 | + _LIBCPP_INLINE_VISIBILITY |
| 52 | + _Tp load(memory_order = memory_order_seq_cst) const volatile _NOEXCEPT |
| 53 | + { const lock_type g(const_cast<mutex&>(__lock_)); return na(); } |
| 54 | + _LIBCPP_INLINE_VISIBILITY |
| 55 | + _Tp load(memory_order = memory_order_seq_cst) const _NOEXCEPT |
| 56 | + { const lock_type g(__lock_); return na(); } |
| 57 | + _LIBCPP_INLINE_VISIBILITY |
| 58 | + operator _Tp() const volatile _NOEXCEPT {return load();} |
| 59 | + _LIBCPP_INLINE_VISIBILITY |
| 60 | + operator _Tp() const _NOEXCEPT {return load();} |
| 61 | + _LIBCPP_INLINE_VISIBILITY |
| 62 | + _Tp exchange(_Tp __d, memory_order = memory_order_seq_cst) volatile _NOEXCEPT |
| 63 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 64 | + // or use std::swap |
| 65 | + const _Tp ret = na(); na() = __d; return ret; } |
| 66 | + _LIBCPP_INLINE_VISIBILITY |
| 67 | + _Tp exchange(_Tp __d, memory_order = memory_order_seq_cst) _NOEXCEPT |
| 68 | + { const lock_type g(__lock_); |
| 69 | + // or use std::swap |
| 70 | + const _Tp ret = na(); na() = __d; return ret; } |
| 71 | + _LIBCPP_INLINE_VISIBILITY |
| 72 | + bool compare_exchange_weak(_Tp& __e, _Tp __d, |
| 73 | + memory_order __s, memory_order __f) volatile _NOEXCEPT |
| 74 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 75 | + if (na() == __e) { na() = __d; return true; } |
| 76 | + else { __e = na(); return false; } |
| 77 | + } |
| 78 | + _LIBCPP_INLINE_VISIBILITY |
| 79 | + bool compare_exchange_weak(_Tp& __e, _Tp __d, |
| 80 | + memory_order __s, memory_order __f) _NOEXCEPT |
| 81 | + { const lock_type g(__lock_); |
| 82 | + if (na() == __e) { na() = __d; return true; } |
| 83 | + else { __e = na(); return false; } |
| 84 | + } |
| 85 | + |
| 86 | + // for now, _weak inditinguishable from _strong |
| 87 | + _LIBCPP_INLINE_VISIBILITY |
| 88 | + bool compare_exchange_strong(_Tp& __e, _Tp __d, |
| 89 | + memory_order __s, memory_order __f) volatile _NOEXCEPT |
| 90 | + {return compare_exchange_weak(__e, __d, __s, __f);} |
| 91 | + _LIBCPP_INLINE_VISIBILITY |
| 92 | + bool compare_exchange_strong(_Tp& __e, _Tp __d, |
| 93 | + memory_order __s, memory_order __f) _NOEXCEPT |
| 94 | + {return compare_exchange_weak(__e, __d, __s, __f);} |
| 95 | + _LIBCPP_INLINE_VISIBILITY |
| 96 | + bool compare_exchange_weak(_Tp& __e, _Tp __d, |
| 97 | + memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 98 | + {return compare_exchange_weak(__e, __d, __m, __m);} |
| 99 | + _LIBCPP_INLINE_VISIBILITY |
| 100 | + bool compare_exchange_weak(_Tp& __e, _Tp __d, |
| 101 | + memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 102 | + {return compare_exchange_weak(__e, __d, __m, __m);} |
| 103 | + _LIBCPP_INLINE_VISIBILITY |
| 104 | + bool compare_exchange_strong(_Tp& __e, _Tp __d, |
| 105 | + memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 106 | + {return compare_exchange_strong(__e, __d, __m, __m);} |
| 107 | + _LIBCPP_INLINE_VISIBILITY |
| 108 | + bool compare_exchange_strong(_Tp& __e, _Tp __d, |
| 109 | + memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 110 | + {return compare_exchange_strong(__e, __d, __m, __m);} |
| 111 | + |
| 112 | + _LIBCPP_INLINE_VISIBILITY |
| 113 | +#ifndef _LIBCPP_HAS_NO_DEFAULTED_FUNCTIONS |
| 114 | + __atomic_mutex_locked() _NOEXCEPT = default; |
| 115 | +#else |
| 116 | + __atomic_mutex_locked() _NOEXCEPT : __a_() {} |
| 117 | +#endif // _LIBCPP_HAS_NO_DEFAULTED_FUNCTIONS |
| 118 | + |
| 119 | + _LIBCPP_INLINE_VISIBILITY |
| 120 | + _LIBCPP_CONSTEXPR __atomic_mutex_locked(_Tp __d) _NOEXCEPT : __a_(__d) {} |
| 121 | +#ifndef _LIBCPP_HAS_NO_DELETED_FUNCTIONS |
| 122 | + __atomic_mutex_locked(const __atomic_mutex_locked&) = delete; |
| 123 | + __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) = delete; |
| 124 | + __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) volatile = delete; |
| 125 | +#else // _LIBCPP_HAS_NO_DELETED_FUNCTIONS |
| 126 | +private: |
| 127 | + __atomic_mutex_locked(const __atomic_mutex_locked&); |
| 128 | + __atomic_mutex_locked& operator=(const __atomic_mutex_locked&); |
| 129 | + __atomic_mutex_locked& operator=(const __atomic_mutex_locked&) volatile; |
| 130 | +#endif // _LIBCPP_HAS_NO_DELETED_FUNCTIONS |
| 131 | +}; // end struct __atomic_mutex_locked |
| 132 | + |
| 133 | +// atomic<Integral> |
| 134 | + |
| 135 | +template <class _Tp> |
| 136 | +struct __atomic_mutex_locked<_Tp, true> |
| 137 | + : public __atomic_mutex_locked<_Tp, false> |
| 138 | +{ |
| 139 | + typedef __atomic_mutex_locked<_Tp, false> __base; |
| 140 | + typedef typename __base::lock_type lock_type; |
| 141 | + using __base::__lock_; |
| 142 | + using __base::na; |
| 143 | + |
| 144 | + _LIBCPP_INLINE_VISIBILITY |
| 145 | + __atomic_mutex_locked() _NOEXCEPT _LIBCPP_DEFAULT |
| 146 | + _LIBCPP_INLINE_VISIBILITY |
| 147 | + _LIBCPP_CONSTEXPR __atomic_mutex_locked(_Tp __d) _NOEXCEPT : __base(__d) {} |
| 148 | + |
| 149 | + _LIBCPP_INLINE_VISIBILITY |
| 150 | + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 151 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 152 | + const _Tp ret = na(); na() += __op; return ret; |
| 153 | + } |
| 154 | + _LIBCPP_INLINE_VISIBILITY |
| 155 | + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 156 | + { const lock_type g(__lock_); |
| 157 | + const _Tp ret = na(); na() += __op; return ret; |
| 158 | + } |
| 159 | + _LIBCPP_INLINE_VISIBILITY |
| 160 | + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 161 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 162 | + const _Tp ret = na(); na() -= __op; return ret; |
| 163 | + } |
| 164 | + _LIBCPP_INLINE_VISIBILITY |
| 165 | + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 166 | + { const lock_type g(__lock_); |
| 167 | + const _Tp ret = na(); na() -= __op; return ret; |
| 168 | + } |
| 169 | + _LIBCPP_INLINE_VISIBILITY |
| 170 | + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 171 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 172 | + const _Tp ret = na(); na() &= __op; return ret; |
| 173 | + } |
| 174 | + _LIBCPP_INLINE_VISIBILITY |
| 175 | + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 176 | + { const lock_type g(__lock_); |
| 177 | + const _Tp ret = na(); na() &= __op; return ret; |
| 178 | + } |
| 179 | + _LIBCPP_INLINE_VISIBILITY |
| 180 | + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 181 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 182 | + const _Tp ret = na(); na() |= __op; return ret; |
| 183 | + } |
| 184 | + _LIBCPP_INLINE_VISIBILITY |
| 185 | + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 186 | + { const lock_type g(__lock_); |
| 187 | + const _Tp ret = na(); na() |= __op; return ret; |
| 188 | + } |
| 189 | + _LIBCPP_INLINE_VISIBILITY |
| 190 | + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) volatile _NOEXCEPT |
| 191 | + { const lock_type g(const_cast<mutex&>(__lock_)); |
| 192 | + const _Tp ret = na(); na() ^= __op; return ret; |
| 193 | + } |
| 194 | + _LIBCPP_INLINE_VISIBILITY |
| 195 | + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) _NOEXCEPT |
| 196 | + { const lock_type g(__lock_); |
| 197 | + const _Tp ret = na(); na() ^= __op; return ret; |
| 198 | + } |
| 199 | + |
| 200 | + _LIBCPP_INLINE_VISIBILITY |
| 201 | + _Tp operator++(int) volatile _NOEXCEPT {return fetch_add(_Tp(1));} |
| 202 | + _LIBCPP_INLINE_VISIBILITY |
| 203 | + _Tp operator++(int) _NOEXCEPT {return fetch_add(_Tp(1));} |
| 204 | + _LIBCPP_INLINE_VISIBILITY |
| 205 | + _Tp operator--(int) volatile _NOEXCEPT {return fetch_sub(_Tp(1));} |
| 206 | + _LIBCPP_INLINE_VISIBILITY |
| 207 | + _Tp operator--(int) _NOEXCEPT {return fetch_sub(_Tp(1));} |
| 208 | + _LIBCPP_INLINE_VISIBILITY |
| 209 | + _Tp operator++() volatile _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);} |
| 210 | + _LIBCPP_INLINE_VISIBILITY |
| 211 | + _Tp operator++() _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);} |
| 212 | + _LIBCPP_INLINE_VISIBILITY |
| 213 | + _Tp operator--() volatile _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);} |
| 214 | + _LIBCPP_INLINE_VISIBILITY |
| 215 | + _Tp operator--() _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);} |
| 216 | + _LIBCPP_INLINE_VISIBILITY |
| 217 | + _Tp operator+=(_Tp __op) volatile _NOEXCEPT {return fetch_add(__op) + __op;} |
| 218 | + _LIBCPP_INLINE_VISIBILITY |
| 219 | + _Tp operator+=(_Tp __op) _NOEXCEPT {return fetch_add(__op) + __op;} |
| 220 | + _LIBCPP_INLINE_VISIBILITY |
| 221 | + _Tp operator-=(_Tp __op) volatile _NOEXCEPT {return fetch_sub(__op) - __op;} |
| 222 | + _LIBCPP_INLINE_VISIBILITY |
| 223 | + _Tp operator-=(_Tp __op) _NOEXCEPT {return fetch_sub(__op) - __op;} |
| 224 | + _LIBCPP_INLINE_VISIBILITY |
| 225 | + _Tp operator&=(_Tp __op) volatile _NOEXCEPT {return fetch_and(__op) & __op;} |
| 226 | + _LIBCPP_INLINE_VISIBILITY |
| 227 | + _Tp operator&=(_Tp __op) _NOEXCEPT {return fetch_and(__op) & __op;} |
| 228 | + _LIBCPP_INLINE_VISIBILITY |
| 229 | + _Tp operator|=(_Tp __op) volatile _NOEXCEPT {return fetch_or(__op) | __op;} |
| 230 | + _LIBCPP_INLINE_VISIBILITY |
| 231 | + _Tp operator|=(_Tp __op) _NOEXCEPT {return fetch_or(__op) | __op;} |
| 232 | + _LIBCPP_INLINE_VISIBILITY |
| 233 | + _Tp operator^=(_Tp __op) volatile _NOEXCEPT {return fetch_xor(__op) ^ __op;} |
| 234 | + _LIBCPP_INLINE_VISIBILITY |
| 235 | + _Tp operator^=(_Tp __op) _NOEXCEPT {return fetch_xor(__op) ^ __op;} |
| 236 | +}; |
| 237 | + |
| 238 | +_LIBCPP_END_NAMESPACE_STD |
| 239 | + |
| 240 | +#endif // _LIBCPP_ATOMIC_LOCKED |
0 commit comments