Permalink
Browse files

[libcmyth/refmem] Pull across upstream changes to support other archi…

…tectures.
  • Loading branch information...
David Teirney
David Teirney committed Sep 24, 2012
1 parent beb2664 commit 09e173e18b17f945b83220cb18800f3f4a0b93d7
Showing with 203 additions and 42 deletions.
  1. +80 −16 lib/cmyth/include/refmem/atomic.h
  2. +32 −0 lib/cmyth/include/refmem/refmem.h
  3. +72 −12 lib/cmyth/librefmem/alloc.c
  4. +19 −14 lib/cmyth/librefmem/refmem_local.h
@@ -24,15 +24,30 @@
#pragma GCC optimization_level 0
#endif
-#ifdef _MSC_VER
+#if defined(_MSC_VER)
#include <windows.h>
+#define inline __inline
#endif
+
+#if defined __mips__
+#include <atomic.h>
+#endif
+
+#if defined(__APPLE__)
+#include <libkern/OSAtomic.h>
+
+typedef volatile int32_t mvp_atomic_t;
+
+#define __mvp_atomic_increment(x) OSAtomicIncrement32(x)
+#define __mvp_atomic_decrement(x) OSAtomicDecrement32(x)
+#else
+typedef volatile unsigned int mvp_atomic_t;
+
/**
* Atomically incremente a reference count variable.
* \param valp address of atomic variable
* \return incremented reference count
*/
-typedef unsigned mvp_atomic_t;
static inline unsigned
__mvp_atomic_increment(mvp_atomic_t *valp)
{
@@ -45,8 +60,8 @@ __mvp_atomic_increment(mvp_atomic_t *valp)
: "r" (valp), "0" (0x1)
: "cc", "memory"
);
-#elif defined __i386__
- asm volatile (".byte 0xf0, 0x0f, 0xc1, 0x02" /*lock; xaddl %eax, (%edx) */
+#elif defined __i386__ || defined __x86_64__
+ __asm__ volatile (".byte 0xf0, 0x0f, 0xc1, 0x02" /*lock; xaddl %eax, (%edx) */
: "=a" (__val)
: "0" (1), "m" (*valp), "d" (valp)
: "memory");
@@ -63,16 +78,35 @@ __mvp_atomic_increment(mvp_atomic_t *valp)
: "r" (valp)
: "cc", "memory");
#elif defined _MSC_VER
- __val = InterlockedIncrement(valp);
-#else
+ __val = InterlockedIncrement(valp);
+#elif defined ANDROID
+ __val = __atomic_inc(valp) + 1;
+#elif defined __arm__ && !defined __thumb__
+ int tmp1, tmp2;
+ int inc = 1;
+ __asm__ __volatile__ (
+ "\n"
+ "0:"
+ "ldr %0, [%3]\n"
+ "add %1, %0, %4\n"
+ "swp %2, %1, [%3]\n"
+ "cmp %0, %2\n"
+ "swpne %0, %2, [%3]\n"
+ "bne 0b\n"
+ : "=&r"(tmp1), "=&r"(__val), "=&r"(tmp2)
+ : "r" (valp), "r"(inc)
+ : "cc", "memory");
+#elif defined __mips__
+ __val = atomic_increment_val(valp);
+#elif defined __GNUC__
/*
* Don't know how to atomic increment for a generic architecture
- * so punt and just increment the value.
+ * so try to use GCC builtin
*/
-#ifdef _WIN32
- #pragma message("unknown architecture, atomic increment is not...");
+ __val = __sync_add_and_fetch(valp,1);
#else
- #warning unknown architecture, atomic increment is not...
+#if !defined(_MSC_VER)
+#warning unknown architecture, atomic increment is not...
#endif
__val = ++(*valp);
#endif
@@ -96,8 +130,8 @@ __mvp_atomic_decrement(mvp_atomic_t *valp)
: "r" (valp), "0" (0x1)
: "cc", "memory"
);
-#elif defined __i386__
- asm volatile (".byte 0xf0, 0x0f, 0xc1, 0x02" /*lock; xaddl %eax, (%edx) */
+#elif defined __i386__ || defined __x86_64__
+ __asm__ volatile (".byte 0xf0, 0x0f, 0xc1, 0x02" /*lock; xaddl %eax, (%edx) */
: "=a" (__val)
: "0" (-1), "m" (*valp), "d" (valp)
: "memory");
@@ -113,6 +147,25 @@ __mvp_atomic_decrement(mvp_atomic_t *valp)
: "=&r" (__val)
: "r" (valp)
: "cc", "memory");
+#elif defined ANDROID
+ __val = __atomic_dec(valp) - 1;
+#elif defined __arm__ && !defined __thumb__
+ int tmp1, tmp2;
+ int inc = -1;
+ __asm__ __volatile__ (
+ "\n"
+ "0:"
+ "ldr %0, [%3]\n"
+ "add %1, %0, %4\n"
+ "swp %2, %1, [%3]\n"
+ "cmp %0, %2\n"
+ "swpne %0, %2, [%3]\n"
+ "bne 0b\n"
+ : "=&r"(tmp1), "=&r"(__val), "=&r"(tmp2)
+ : "r" (valp), "r"(inc)
+ : "cc", "memory");
+#elif defined __mips__
+ __val = atomic_decrement_val(valp);
#elif defined __sparcv9__
mvp_atomic_t __newval, __oldval = (*valp);
do
@@ -126,17 +179,23 @@ __mvp_atomic_decrement(mvp_atomic_t *valp)
/* The value for __val is in '__oldval' */
__val = __oldval;
#elif defined _MSC_VER
- __val = InterlockedDecrement(valp);
-#else
+ __val = InterlockedDecrement(valp);
+#elif defined __GNUC__
/*
* Don't know how to atomic decrement for a generic architecture
- * so punt and just decrement the value.
+ * so use GCC builtin
*/
-//#warning unknown architecture, atomic decrement is not...
+ __val = __sync_sub_and_fetch(valp,1);
+#else
+#if !defined(_MSC_VER)
+#warning unknown architecture, atomic deccrement is not...
+#endif
__val = --(*valp);
#endif
return __val;
}
+#endif
+
#define mvp_atomic_inc __mvp_atomic_inc
static inline int mvp_atomic_inc(mvp_atomic_t *a) {
return __mvp_atomic_increment(a);
@@ -157,6 +216,11 @@ static inline void mvp_atomic_set(mvp_atomic_t *a, unsigned val) {
*a = val;
};
+#define mvp_atomic_val __mvp_atomic_val
+static inline int mvp_atomic_val(mvp_atomic_t *a) {
+ return *a;
+};
+
#ifdef __APPLE__
#pragma GCC optimization_level reset
#endif
@@ -24,12 +24,17 @@
#ifndef __REFMEM_H
#define __REFMEM_H
+#include "atomic.h"
+
/*
* -----------------------------------------------------------------
* Types
* -----------------------------------------------------------------
*/
+/* Return current number of references outstanding for everything */
+extern int ref_get_refcount();
+
/**
* Release a reference to allocated memory.
* \param p allocated memory
@@ -95,4 +100,31 @@ extern void ref_set_destroy(void *block, ref_destroy_t func);
*/
extern void ref_alloc_show(void);
+/*
+ * Debug level constants used to determine the level of debug tracing
+ * to be done and the debug level of any given message.
+ */
+
+#define REF_DBG_NONE -1
+#define REF_DBG_ERRORS 0
+#define REF_DBG_COUNTERS 1
+#define REF_DBG_DEBUG 2
+#define REF_DBG_ALL 3
+
+/**
+ * Set librefmem debug level.
+ * \param l debugging level (-1 for none, 3 for all)
+ */
+void refmem_dbg_level(int l);
+
+/**
+ * Enable all librefmem debugging.
+ */
+void refmem_dbg_all();
+
+/**
+ * Disable all librefmem debugging.
+ */
+void refmem_dbg_none();
+
#endif /* __REFMEM_H */
Oops, something went wrong.

0 comments on commit 09e173e

Please sign in to comment.