Skip to content

Commit

Permalink
[clang][compiler-rt][atomics] Add __c11_atomic_fetch_nand builtin a…
Browse files Browse the repository at this point in the history
…nd support `__atomic_fetch_nand` libcall

Add `__c11_atomic_fetch_nand` builtin to language extensions and support `__atomic_fetch_nand` libcall in compiler-rt.

Reviewed By: theraven

Differential Revision: https://reviews.llvm.org/D112400
  • Loading branch information
bzEq committed Oct 28, 2021
1 parent 6cf6fa6 commit 6ea2431
Show file tree
Hide file tree
Showing 9 changed files with 71 additions and 0 deletions.
1 change: 1 addition & 0 deletions clang/docs/LanguageExtensions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2866,6 +2866,7 @@ the corresponding C11 operations, are:
* ``__c11_atomic_fetch_and``
* ``__c11_atomic_fetch_or``
* ``__c11_atomic_fetch_xor``
* ``__c11_atomic_fetch_nand`` (Nand is not presented in ``<stdatomic.h>``)
* ``__c11_atomic_fetch_max``
* ``__c11_atomic_fetch_min``
Expand Down
1 change: 1 addition & 0 deletions clang/include/clang/Basic/Builtins.def
Original file line number Diff line number Diff line change
Expand Up @@ -796,6 +796,7 @@ ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_nand, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t")
BUILTIN(__c11_atomic_thread_fence, "vi", "n")
Expand Down
1 change: 1 addition & 0 deletions clang/lib/AST/Expr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4695,6 +4695,7 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__c11_atomic_fetch_and:
case AO__c11_atomic_fetch_or:
case AO__c11_atomic_fetch_xor:
case AO__c11_atomic_fetch_nand:
case AO__c11_atomic_fetch_max:
case AO__c11_atomic_fetch_min:
case AO__atomic_fetch_add:
Expand Down
4 changes: 4 additions & 0 deletions clang/lib/CodeGen/CGAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -664,6 +664,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
Expand Down Expand Up @@ -906,6 +907,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_and:
Expand Down Expand Up @@ -972,6 +974,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
Expand Down Expand Up @@ -1211,6 +1214,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
Expand Down
1 change: 1 addition & 0 deletions clang/lib/Sema/SemaChecking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5287,6 +5287,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
Expand Down
9 changes: 9 additions & 0 deletions clang/test/Sema/atomic-implicit-seq_cst.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,14 @@ int bad_bitor_2(int i) {
return i | atom; // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}}
}

int bad_bitnand_1(int i) {
return ~(atom & i); // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}}
}

int bad_bitnand_2(int i) {
return ~(i & atom); // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}}
}

int bad_and_1(int i) {
return atom && i; // expected-warning {{implicit use of sequentially-consistent atomic may incur stronger memory barriers than necessary}}
}
Expand Down Expand Up @@ -315,6 +323,7 @@ int good_c11_atomic_fetch_sub(int i) { return __c11_atomic_fetch_sub(&atom, i, _
int good_c11_atomic_fetch_and(int i) { return __c11_atomic_fetch_and(&atom, i, __ATOMIC_RELAXED); }
int good_c11_atomic_fetch_or(int i) { return __c11_atomic_fetch_or(&atom, i, __ATOMIC_RELAXED); }
int good_c11_atomic_fetch_xor(int i) { return __c11_atomic_fetch_xor(&atom, i, __ATOMIC_RELAXED); }
int good_c11_atomic_fetch_nand(int i) { return __c11_atomic_fetch_nand(&atom, i, __ATOMIC_RELAXED); }

void good_cast_to_void(void) { (void)atom; }
_Atomic(int) * good_address_of(void) { return &atom; }
Expand Down
11 changes: 11 additions & 0 deletions clang/test/Sema/atomic-ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,13 @@ void memory_checks(_Atomic(int) *Ap, int *p, int val) {
(void)__c11_atomic_fetch_xor(Ap, val, memory_order_acq_rel);
(void)__c11_atomic_fetch_xor(Ap, val, memory_order_seq_cst);

(void)__c11_atomic_fetch_nand(Ap, val, memory_order_relaxed);
(void)__c11_atomic_fetch_nand(Ap, val, memory_order_acquire);
(void)__c11_atomic_fetch_nand(Ap, val, memory_order_consume);
(void)__c11_atomic_fetch_nand(Ap, val, memory_order_release);
(void)__c11_atomic_fetch_nand(Ap, val, memory_order_acq_rel);
(void)__c11_atomic_fetch_nand(Ap, val, memory_order_seq_cst);

(void)__c11_atomic_fetch_min(Ap, val, memory_order_relaxed);
(void)__c11_atomic_fetch_min(Ap, val, memory_order_acquire);
(void)__c11_atomic_fetch_min(Ap, val, memory_order_consume);
Expand Down Expand Up @@ -602,6 +609,8 @@ void nullPointerWarning() {
(void)__c11_atomic_fetch_or((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}
(void)__c11_atomic_fetch_xor((volatile _Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}
(void)__c11_atomic_fetch_xor((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}
(void)__c11_atomic_fetch_nand((volatile _Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}
(void)__c11_atomic_fetch_nand((_Atomic(int)*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}

__atomic_store_n((volatile int*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}
__atomic_store_n((int*)0, 42, memory_order_relaxed); // expected-warning {{null passed to a callee that requires a non-null argument}}
Expand Down Expand Up @@ -680,6 +689,8 @@ void nullPointerWarning() {
(void)__c11_atomic_fetch_or(&ai, 0, memory_order_relaxed);
(void)__c11_atomic_fetch_xor(&vai, 0, memory_order_relaxed);
(void)__c11_atomic_fetch_xor(&ai, 0, memory_order_relaxed);
(void)__c11_atomic_fetch_nand(&vai, 0, memory_order_relaxed);
(void)__c11_atomic_fetch_nand(&ai, 0, memory_order_relaxed);

// Ditto.
__atomic_store_n(&vi, 0, memory_order_relaxed);
Expand Down
15 changes: 15 additions & 0 deletions compiler-rt/lib/builtins/atomic.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,6 +336,18 @@ OPTIMISED_CASES
return tmp; \
}

#define ATOMIC_RMW_NAND(n, lockfree, type) \
type __atomic_fetch_nand_##n(type *ptr, type val, int model) { \
if (lockfree(ptr)) \
return __c11_atomic_fetch_nand((_Atomic(type) *)ptr, val, model); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
type tmp = *ptr; \
*ptr = ~(tmp & val); \
unlock(l); \
return tmp; \
}

#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
OPTIMISED_CASES
#undef OPTIMISED_CASE
Expand All @@ -351,3 +363,6 @@ OPTIMISED_CASES
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
OPTIMISED_CASES
#undef OPTIMISED_CASE
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type)
OPTIMISED_CASES
#undef OPTIMISED_CASE
28 changes: 28 additions & 0 deletions compiler-rt/test/builtins/Unit/atomic_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,11 @@ uint16_t __atomic_fetch_xor_2(uint16_t *ptr, uint16_t val, int model);
uint32_t __atomic_fetch_xor_4(uint32_t *ptr, uint32_t val, int model);
uint64_t __atomic_fetch_xor_8(uint64_t *ptr, uint64_t val, int model);

uint8_t __atomic_fetch_nand_1(uint8_t *ptr, uint8_t val, int model);
uint16_t __atomic_fetch_nand_2(uint16_t *ptr, uint16_t val, int model);
uint32_t __atomic_fetch_nand_4(uint32_t *ptr, uint32_t val, int model);
uint64_t __atomic_fetch_nand_8(uint64_t *ptr, uint64_t val, int model);

// We conditionally test the *_16 atomic function variants based on the same
// condition that compiler_rt (atomic.c) uses to conditionally generate them.
// Currently atomic.c tests if __SIZEOF_INT128__ is defined (which can be the
Expand All @@ -119,6 +124,7 @@ uint128_t __atomic_fetch_sub_16(uint128_t *ptr, uint128_t val, int model);
uint128_t __atomic_fetch_and_16(uint128_t *ptr, uint128_t val, int model);
uint128_t __atomic_fetch_or_16(uint128_t *ptr, uint128_t val, int model);
uint128_t __atomic_fetch_xor_16(uint128_t *ptr, uint128_t val, int model);
uint128_t __atomic_fetch_nand_16(uint128_t *ptr, uint128_t val, int model);
#else
typedef uint64_t maxuint_t;
#endif
Expand Down Expand Up @@ -540,6 +546,28 @@ void test_fetch_op(void) {
abort();
#endif

// Fetch nand.

set_a_values(V + m);
set_b_values(0);
b8 = __atomic_fetch_nand_1(&a8, U8(ONES), model);
if (b8 != U8(V + m) || a8 != U8(~((V + m) & ONES)))
abort();
b16 = __atomic_fetch_nand_2(&a16, U16(ONES), model);
if (b16 != U16(V + m) || a16 != U16(~((V + m) & ONES)))
abort();
b32 = __atomic_fetch_nand_4(&a32, U32(ONES), model);
if (b32 != U32(V + m) || a32 != U32(~((V + m) & ONES)))
abort();
b64 = __atomic_fetch_nand_8(&a64, U64(ONES), model);
if (b64 != U64(V + m) || a64 != U64(~((V + m) & ONES)))
abort();
#ifdef TEST_16
b128 = __atomic_fetch_nand_16(&a128, ONES, model);
if (b128 != (V + m) || a128 != ~((V + m) & ONES))
abort();
#endif

// Check signed integer overflow behavior

set_a_values(V + m);
Expand Down

0 comments on commit 6ea2431

Please sign in to comment.