Skip to content

Commit

Permalink
Use correct way to test for MIPS arch after rOMP355687
Browse files Browse the repository at this point in the history
Summary:
I ran into some issues after rOMP355687, where __atomic_fetch_add was
being used incorrectly on x86, and this turns out to be caused by the
following added conditionals:

```
#if defined(KMP_ARCH_MIPS)
```

The problem is, these macros are always defined, and are either 0 or 1
depending on the architecture.  E.g. the correct way to test for MIPS
is:

```
#if KMP_ARCH_MIPS
```

Reviewers: petarj, jlpeyton, Hahnfeld, AndreyChurbanov

Reviewed By: petarj, AndreyChurbanov

Subscribers: AndreyChurbanov, sdardis, arichardson, atanasyan, jfb, jdoerfert, openmp-commits, llvm-commits

Tags: #openmp

Differential Revision: https://reviews.llvm.org/D60938

llvm-svn: 358911
  • Loading branch information
DimitryAndric committed Apr 22, 2019
1 parent 2b74466 commit 87e7f89
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions openmp/runtime/src/kmp_os.h
Expand Up @@ -533,7 +533,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
__sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
#define KMP_TEST_THEN_INC_ACQ32(p) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
#if defined(KMP_ARCH_MIPS)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_INC64(p) \
__atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
#define KMP_TEST_THEN_INC_ACQ64(p) \
Expand All @@ -548,7 +548,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
__sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
#define KMP_TEST_THEN_ADD4_ACQ32(p) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
#if defined(KMP_ARCH_MIPS)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_ADD4_64(p) \
__atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
#define KMP_TEST_THEN_ADD4_ACQ64(p) \
Expand All @@ -575,7 +575,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
__sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
#define KMP_TEST_THEN_ADD32(p, v) \
__sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
#if defined(KMP_ARCH_MIPS)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_ADD64(p, v) \
__atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
__ATOMIC_SEQ_CST)
Expand All @@ -592,7 +592,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
__sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
#define KMP_TEST_THEN_AND32(p, v) \
__sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
#if defined(KMP_ARCH_MIPS)
#if KMP_ARCH_MIPS
#define KMP_TEST_THEN_OR64(p, v) \
__atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
__ATOMIC_SEQ_CST)
Expand Down Expand Up @@ -637,7 +637,7 @@ extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
__sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
(kmp_uint32)(sv))
#if defined(KMP_ARCH_MIPS)
#if KMP_ARCH_MIPS
static inline bool mips_sync_bool_compare_and_swap(
volatile kmp_uint64 *p, kmp_uint64 cv, kmp_uint64 sv) {
return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
Expand Down

0 comments on commit 87e7f89

Please sign in to comment.