Permalink
Browse files

atomic: better API, return result of operation

  • Loading branch information...
1 parent 3e0157c commit de04f58edfb3ce00f66d9db85574f529c8589710 @indutny committed Oct 13, 2012
Showing with 24 additions and 17 deletions.
  1. +22 −15 src/atomic.h
  2. +2 −2 src/lring.c
View
@@ -5,16 +5,18 @@
#if defined(__GNUC__)
# if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
-# define ATOMIC_ADD(arg, num) __sync_add_and_fetch(&arg, num)
-# define ATOMIC_SUB(arg, num) __sync_sub_and_fetch(&arg, num)
+# define ATOMIC_ADD(arg, num) __sync_add_and_fetch(arg, num)
+# define ATOMIC_SUB(arg, num) __sync_sub_and_fetch(arg, num)
# elif defined( __i386__ ) || defined( __i486__ ) || defined( __i586__ ) || \
defined( __i686__ ) || defined( __x86_64__ )
-# define ATOMIC_ADD(arg, num) asm volatile ("lock add %0, %1\n" : \
- : "r" (num), "m" (arg))
-# define ATOMIC_SUB(arg, num) asm volatile ("lock sub %0, %1\n" : \
- : "r" (num), "m" (arg))
+# define ATOMIC_ADD(arg, num) asm volatile ("lock xadd %2, %1\n" : \
+ "=m" (*arg) : \
+ "m" (*arg), "r" (num))
+# define ATOMIC_SUB(arg, num) asm volatile ("lock xadd %2, %1\n" : \
+ "=m" (*arg) : \
+ "m" (*arg), "r" (-num))
# else
@@ -29,28 +31,33 @@
# if defined( __x86_64__ ) || defined ( __ppc64__)
# define ATOMIC_CAST_WORD(arg) ((volatile int64_t*)(arg))
-# define ATOMIC_ADD(arg, num) OSAtomicAdd64(num, ATOMIC_CAST_WORD(&arg));
-# define ATOMIC_SUB(arg, num) OSAtomicAdd64(-num, ATOMIC_CAST_WORD(&arg));
+# define ATOMIC_ADD(arg, num) OSAtomicAdd64((num), ATOMIC_CAST_WORD((arg)))
+# define ATOMIC_SUB(arg, num) OSAtomicAdd64(-(num), ATOMIC_CAST_WORD((arg)))
# else
# define ATOMIC_CAST_WORD(arg) ((volatile int32_t*)(arg))
-# define ATOMIC_ADD(arg, num) OSAtomicAdd32(num, ATOMIC_CAST_WORD(&arg));
-# define ATOMIC_SUB(arg, num) OSAtomicAdd32(-num, ATOMIC_CAST_WORD(&arg));
+# define ATOMIC_ADD(arg, num) OSAtomicAdd32((num), ATOMIC_CAST_WORD((arg)))
+# define ATOMIC_SUB(arg, num) OSAtomicAdd32(-(num), ATOMIC_CAST_WORD((arg)))
# endif
#elif defined(_MSC_VER)
# if defined(_M_X64)
- extern "C" __int64 _InterlockedExchangeAdd64(__int64 volatile* addend, __int64 value);
+ extern "C" __int64 _InterlockedExchangeAdd64(__int64 volatile* addend,
+ __int64 value);
# pragma intrinsic (_InterlockedExchangeAdd64)
-# define ATOMIC_ADD(arg, num) ((void) _InterlockedExchangeAdd64(&(arg), (num)))
-# define ATOMIC_SUB(arg, num) ((void) _InterlockedExchangeAdd64(&(arg), -(num)))
+# define ATOMIC_ADD(arg, num) (_InterlockedExchangeAdd64((arg), (num)) + \
+ (num))
+# define ATOMIC_SUB(arg, num) (_InterlockedExchangeAdd64((arg), -(num)) - \
+ (num))
# elif defined(_M_IX86)
extern "C" long _InterlockedExchangeAdd(long volatile* addend, long value);
# pragma intrinsic (_InterlockedExchangeAdd)
-# define ATOMIC_ADD(arg, num) ((void) _InterlockedExchangeAdd(&(arg), (num)))
-# define ATOMIC_SUB(arg, num) ((void) _InterlockedExchangeAdd(&(arg), -(num)))
+# define ATOMIC_ADD(arg, num) (_InterlockedExchangeAdd(&(arg), (num)) + \
+ (num))
+# define ATOMIC_SUB(arg, num) (_InterlockedExchangeAdd(&(arg), -(num)) - \
+ (num))
# else
# error Atomic operations are not supported on your platform
View
@@ -100,7 +100,7 @@ void lring_write(lring_t* ring, const char* data, ssize_t size) {
}
PaUtil_WriteMemoryBarrier();
- ATOMIC_ADD(ring->total, bytes);
+ ATOMIC_ADD(&ring->total, bytes);
}
assert(size == offset);
}
@@ -135,7 +135,7 @@ ssize_t lring_read(lring_t* ring, char* data, ssize_t size) {
roffset = p->roffset + bytes;
p->roffset = roffset;
- ATOMIC_SUB(ring->total, bytes);
+ ATOMIC_SUB(&ring->total, bytes);
assert(roffset >= 0);

0 comments on commit de04f58

Please sign in to comment.