Skip to content
This repository has been archived by the owner on Nov 15, 2022. It is now read-only.

Commit

Permalink
Simplify the quasi-atomic stuff and add MIPS support.
Browse files Browse the repository at this point in the history
Change-Id: Iac896c03ef707c998cc714628adea0255f21c035
  • Loading branch information
enh-google committed May 2, 2012
1 parent 5a1fcc8 commit 1c92af6
Show file tree
Hide file tree
Showing 3 changed files with 108 additions and 177 deletions.
279 changes: 102 additions & 177 deletions vm/Atomic.cpp
Expand Up @@ -18,74 +18,40 @@

#include <cutils/atomic.h>

/*
* Quasi-atomic 64-bit operations, for platforms that lack the real thing.
*
* TODO: unify ARMv6/x86/sh implementations using the to-be-written
* spin lock implementation. We don't want to rely on mutex innards,
* and it would be great if all platforms were running the same code.
*/
#if defined(__arm__)
#include <machine/cpu-features.h>
#endif

#if defined(HAVE_MACOSX_IPC)
/*****************************************************************************/

#include <libkern/OSAtomic.h>
#if defined(HAVE_MACOSX_IPC)
#define NEED_MAC_QUASI_ATOMICS 1

#if defined(__ppc__) \
|| defined(__PPC__) \
|| defined(__powerpc__) \
|| defined(__powerpc) \
|| defined(__POWERPC__) \
|| defined(_M_PPC) \
|| defined(__PPC)
#define NEED_QUASIATOMICS 1
#else
#elif defined(__i386__) || defined(__x86_64__)
#define NEED_PTHREADS_QUASI_ATOMICS 1

int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
(int64_t*)addr) == 0;
}
#elif defined(__mips__)
#define NEED_PTHREADS_QUASI_ATOMICS 1

#elif defined(__arm__)

static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
volatile int64_t* addr)
{
int64_t oldValue;
do {
oldValue = *addr;
} while (dvmQuasiAtomicCas64(oldValue, value, addr));
return oldValue;
}
#if defined(__ARM_HAVE_LDREXD)
#define NEED_ARM_LDREXD_QUASI_ATOMICS 1
#else
#define NEED_PTHREADS_QUASI_ATOMICS 1
#endif /*__ARM_HAVE_LDREXD*/

int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
{
return dvmQuasiAtomicSwap64Body(value, addr);
}
#elif defined(__sh__)
#define NEED_PTHREADS_QUASI_ATOMICS 1

int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
{
int64_t oldValue;
ANDROID_MEMBAR_STORE();
oldValue = dvmQuasiAtomicSwap64Body(value, addr);
/* TUNING: barriers can be avoided on some architectures */
ANDROID_MEMBAR_FULL();
return oldValue;
}

int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
{
return OSAtomicAdd64Barrier(0, addr);
}
#else
#error "Unsupported atomic operations for this platform"
#endif

#elif defined(__i386__) || defined(__x86_64__)
#define NEED_QUASIATOMICS 1
/*****************************************************************************/

#elif __arm__
#include <machine/cpu-features.h>
#if NEED_ARM_LDREXD_QUASI_ATOMICS

#ifdef __ARM_HAVE_LDREXD
static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
volatile int64_t* addr)
{
Expand Down Expand Up @@ -144,37 +110,93 @@ int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
: "r" (addr));
return value;
}
#endif

#else
/*****************************************************************************/

#if NEED_MAC_QUASI_ATOMICS

#include <libkern/OSAtomic.h>

int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
(int64_t*)addr) == 0;
}


static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
volatile int64_t* addr)
{
int64_t oldValue;
do {
oldValue = *addr;
} while (dvmQuasiAtomicCas64(oldValue, value, addr));
return oldValue;
}

int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
{
return dvmQuasiAtomicSwap64Body(value, addr);
}

int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
{
int64_t oldValue;
ANDROID_MEMBAR_STORE();
oldValue = dvmQuasiAtomicSwap64Body(value, addr);
/* TUNING: barriers can be avoided on some architectures */
ANDROID_MEMBAR_FULL();
return oldValue;
}

int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
{
return OSAtomicAdd64Barrier(0, addr);
}
#endif

/*****************************************************************************/

#if NEED_PTHREADS_QUASI_ATOMICS

// In the absence of a better implementation, we implement the 64-bit atomic
// operations through mutex locking.

// on the device, we implement the 64-bit atomic operations through
// mutex locking. normally, this is bad because we must initialize
// a pthread_mutex_t before being able to use it, and this means
// having to do an initialization check on each function call, and
// that's where really ugly things begin...
//
// BUT, as a special twist, we take advantage of the fact that in our
// pthread library, a mutex is simply a volatile word whose value is always
// initialized to 0. In other words, simply declaring a static mutex
// object initializes it !
//
// another twist is that we use a small array of mutexes to dispatch
// the contention locks from different memory addresses
//

#include <pthread.h>

#define SWAP_LOCK_COUNT 32U
static pthread_mutex_t _swap_locks[SWAP_LOCK_COUNT];
static const size_t kSwapLockCount = 32;
static pthread_mutex_t* gSwapLocks[kSwapLockCount];

void dvmQuasiAtomicsStartup() {
for (size_t i = 0; i < kSwapLockCount; ++i) {
pthread_mutex_t* m = new pthread_mutex_t;
dvmInitMutex(m);
gSwapLocks[i] = m;
}
}

#define SWAP_LOCK(addr) \
&_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
void dvmQuasiAtomicsShutdown() {
for (size_t i = 0; i < kSwapLockCount; ++i) {
pthread_mutex_t* m = gSwapLocks[i];
gSwapLocks[kSwapLockCount] = NULL;
dvmDestroyMutex(m);
delete m;
}
}

static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
}

int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
{
int64_t oldValue;
pthread_mutex_t* lock = SWAP_LOCK(addr);
pthread_mutex_t* lock = GetSwapLock(addr);

pthread_mutex_lock(lock);

Expand All @@ -195,7 +217,7 @@ int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
int result;
pthread_mutex_t* lock = SWAP_LOCK(addr);
pthread_mutex_t* lock = GetSwapLock(addr);

pthread_mutex_lock(lock);

Expand All @@ -212,115 +234,18 @@ int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
{
int64_t result;
pthread_mutex_t* lock = SWAP_LOCK(addr);
pthread_mutex_t* lock = GetSwapLock(addr);

pthread_mutex_lock(lock);
result = *addr;
pthread_mutex_unlock(lock);
return result;
}

#endif /*__ARM_HAVE_LDREXD*/

/*****************************************************************************/
#elif __sh__
#define NEED_QUASIATOMICS 1

#else
#error "Unsupported atomic operations for this platform"
#endif


#if NEED_QUASIATOMICS

/* Note that a spinlock is *not* a good idea in general
* since they can introduce subtle issues. For example,
* a real-time thread trying to acquire a spinlock already
* acquired by another thread will never yeld, making the
* CPU loop endlessly!
*
* However, this code is only used on the Linux simulator
* so it's probably ok for us.
*
* The alternative is to use a pthread mutex, but
* these must be initialized before being used, and
* then you have the problem of lazily initializing
* a mutex without any other synchronization primitive.
*
* TODO: these currently use sched_yield(), which is not guaranteed to
* do anything at all. We need to use dvmIterativeSleep or a wait /
* notify mechanism if the initial attempt fails.
*/

/* global spinlock for all 64-bit quasiatomic operations */
static int32_t quasiatomic_spinlock = 0;

int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
volatile int64_t* addr)
{
int result;

while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
#ifdef HAVE_WIN32_THREADS
Sleep(0);
#else
sched_yield();
#endif
}

if (*addr == oldvalue) {
*addr = newvalue;
result = 0;
} else {
result = 1;
}

android_atomic_release_store(0, &quasiatomic_spinlock);

return result;
}

int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
{
int64_t result;

while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
#ifdef HAVE_WIN32_THREADS
Sleep(0);
#else
sched_yield();
#endif
}

result = *addr;
android_atomic_release_store(0, &quasiatomic_spinlock);

return result;
}

int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
{
int64_t result;

while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
#ifdef HAVE_WIN32_THREADS
Sleep(0);
#else
sched_yield();
#endif
}

result = *addr;
*addr = value;
android_atomic_release_store(0, &quasiatomic_spinlock);

return result;
}

/* Same as dvmQuasiAtomicSwap64 - syscall handles barrier */
int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
{
return dvmQuasiAtomicSwap64(value, addr);
}
// The other implementations don't need any special setup.
void dvmQuasiAtomicsStartup() {}
void dvmQuasiAtomicsShutdown() {}

#endif /*NEED_QUASIATOMICS*/
#endif /*NEED_PTHREADS_QUASI_ATOMICS*/
3 changes: 3 additions & 0 deletions vm/Atomic.h
Expand Up @@ -23,6 +23,9 @@
#include <cutils/atomic.h> /* use common Android atomic ops */
#include <cutils/atomic-inline.h> /* and some uncommon ones */

void dvmQuasiAtomicsStartup();
void dvmQuasiAtomicsShutdown();

/*
* NOTE: Two "quasiatomic" operations on the exact same memory address
* are guaranteed to operate atomically with respect to each other,
Expand Down
3 changes: 3 additions & 0 deletions vm/Init.cpp
Expand Up @@ -1231,6 +1231,7 @@ std::string dvmStartup(int argc, const char* const argv[],
/*
* Initialize components.
*/
dvmQuasiAtomicsStartup();
if (!dvmAllocTrackerStartup()) {
return "dvmAllocTrackerStartup failed";
}
Expand Down Expand Up @@ -1719,6 +1720,8 @@ void dvmShutdown()

freeAssertionCtrl();

dvmQuasiAtomicsShutdown();

/*
* We want valgrind to report anything we forget to free as "definitely
* lost". If there's a pointer in the global chunk, it would be reported
Expand Down

0 comments on commit 1c92af6

Please sign in to comment.