Skip to content

Commit

Permalink
Reland "[mips][compiler-rt] Provide 64bit atomic add and sub"
Browse files Browse the repository at this point in the history
r318733 introduced a build failure for native MIPS32 systems for xray due
to the lack of __sync_fetch_and_add / __syn_fetch_and_sub support. This patch
extends the existing support providing atomics so that xray can be
successfully built.

The initial patch was reverted in r321292, as I suspected it may have caused the
buildbot failure. Another patch in the updates the bot fetched caused the test
failures which was reverted.

Reviewers: atanasyan, dberris

Differential Revision: https://reviews.llvm.org/D40385

llvm-svn: 321383
  • Loading branch information
Simon Dardis committed Dec 22, 2017
1 parent ca741a8 commit 92365ca
Show file tree
Hide file tree
Showing 3 changed files with 125 additions and 73 deletions.
17 changes: 7 additions & 10 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
Expand Up @@ -78,17 +78,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typedef typename T::Type Type;
Type cmpv = *cmp;
Type prev;
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
if (sizeof(*a) == 8) {
Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
prev = __mips_sync_val_compare_and_swap<u64>(
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg);
} else {
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
}
#else
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
#endif
if (prev == cmpv) return true;
*cmp = prev;
return false;
Expand All @@ -104,6 +94,13 @@ INLINE bool atomic_compare_exchange_weak(volatile T *a,

} // namespace __sanitizer

// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
#if defined(_MIPS_SIM) && defined(_ABIO32)
#include "sanitizer_atomic_clang_mips.h"
#endif

#undef ATOMIC_ORDER

#endif // SANITIZER_ATOMIC_CLANG_H
118 changes: 118 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -0,0 +1,118 @@
//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
// Not intended for direct inclusion. Include sanitizer_atomic.h.
//
//===----------------------------------------------------------------------===//

#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
#define SANITIZER_ATOMIC_CLANG_MIPS_H

namespace __sanitizer {

// MIPS32 does not support atomics > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
// internal spin lock mechanism to emulate atomic oprations when the size is
// 8 bytes.
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))
while (*lock) {
}
}

static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }

// Make sure the lock is on its own cache line to prevent false sharing.
// Put it inside a struct that is aligned and padded to the typical MIPS
// cacheline which is 32 bytes.
static struct {
int lock;
char pad[32 - sizeof(int)];
} __attribute__((aligned(32))) lock = {0, {0}};

template <>
INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));

atomic_uint64_t::Type ret;

__spin_lock(&lock.lock);
ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
ptr->val_dont_use = ret + val;
__spin_unlock(&lock.lock);

return ret;
}

template <>
INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}

template <>
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));

typedef atomic_uint64_t::Type Type;
Type cmpv = *cmp;
Type prev;
bool ret = false;

__spin_lock(&lock.lock);
prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
if (prev == cmpv) {
ret = true;
ptr->val_dont_use = xchg;
}
__spin_unlock(&lock.lock);

return ret;
}

template <>
INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));

atomic_uint64_t::Type zero = 0;
volatile atomic_uint64_t *Newptr =
const_cast<volatile atomic_uint64_t *>(ptr);
return atomic_fetch_add(Newptr, zero, mo);
}

template <>
INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));

__spin_lock(&lock.lock);
ptr->val_dont_use = v;
__spin_unlock(&lock.lock);
}

} // namespace __sanitizer

#endif // SANITIZER_ATOMIC_CLANG_MIPS_H

63 changes: 0 additions & 63 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
Expand Up @@ -17,55 +17,6 @@

namespace __sanitizer {

// MIPS32 does not support atomic > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
// internal spin lock mechanism to emulate atomic oprations when the size is
// 8 bytes.
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))
while (*lock) {
}
}

static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }


// Make sure the lock is on its own cache line to prevent false sharing.
// Put it inside a struct that is aligned and padded to the typical MIPS
// cacheline which is 32 bytes.
static struct {
int lock;
char pad[32 - sizeof(int)];
} __attribute__((aligned(32))) lock = {0};

template <class T>
T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
T ret;

__spin_lock(&lock.lock);

ret = *ptr;
*ptr = ret + val;

__spin_unlock(&lock.lock);

return ret;
}

template <class T>
T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
T ret;
__spin_lock(&lock.lock);

ret = *ptr;
if (ret == oldval) *ptr = newval;

__spin_unlock(&lock.lock);

return ret;
}
#endif

INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
Expand Down Expand Up @@ -103,15 +54,8 @@ INLINE typename T::Type atomic_load(
// 64-bit load on 32-bit platform.
// Gross, but simple and reliable.
// Assume that it is not in read-only memory.
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
typename T::Type volatile *val_ptr =
const_cast<typename T::Type volatile *>(&a->val_dont_use);
v = __mips_sync_fetch_and_add<u64>(
reinterpret_cast<u64 volatile *>(val_ptr), 0);
#else
v = __sync_fetch_and_add(
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
#endif
}
return v;
}
Expand Down Expand Up @@ -141,14 +85,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
typename T::Type cmp = a->val_dont_use;
typename T::Type cur;
for (;;) {
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
typename T::Type volatile *val_ptr =
const_cast<typename T::Type volatile *>(&a->val_dont_use);
cur = __mips_sync_val_compare_and_swap<u64>(
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
#else
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
#endif
if (cmp == v)
break;
cmp = cur;
Expand Down

0 comments on commit 92365ca

Please sign in to comment.