Skip to content

Commit

Permalink
[compiler-rt] Implement __atomic_is_lock_free
Browse files Browse the repository at this point in the history
This function is called by the __atomic_is_lock_free() builtin if the value
cannot be resolved to true at compile time. Lack of this function is
causing the non-lockfree atomics tests in libc++ to not be run (see D91911)

This function is also added in D85044, but that review also adds support
for using lock-free atomics in more cases, whereas this is a minimal change
that just adds __atomic_is_lock_free() for the implementation of atomic.c.

Reviewed By: ldionne

Differential Revision: https://reviews.llvm.org/D92302
  • Loading branch information
arichardson committed Jan 8, 2021
1 parent 99c84af commit 00530de
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 0 deletions.
10 changes: 10 additions & 0 deletions compiler-rt/lib/builtins/atomic.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
__atomic_compare_exchange)
#pragma redefine_extname __atomic_is_lock_free_c SYMBOL_NAME( \
__atomic_is_lock_free)

/// Number of locks. This allocates one page on 32-bit platforms, two on
/// 64-bit. This can be specified externally if a different trade between
Expand Down Expand Up @@ -157,6 +159,14 @@ static __inline Lock *lock_for_pointer(void *ptr) {
} \
} while (0)

/// Whether atomic operations for the given size (and alignment) are lock-free.
bool __atomic_is_lock_free_c(size_t size, void *ptr) {
#define LOCK_FREE_ACTION(type) return true;
LOCK_FREE_CASES(ptr);
#undef LOCK_FREE_ACTION
return false;
}

/// An atomic load operation. This is atomic with respect to the source
/// pointer only.
void __atomic_load_c(int size, void *src, void *dest, int model) {
Expand Down
80 changes: 80 additions & 0 deletions compiler-rt/test/builtins/Unit/atomic_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#undef NDEBUG
#include <assert.h>

// We directly test the library atomic functions, not using the C builtins. This
// should avoid confounding factors, ensuring that we actually test the
Expand All @@ -29,6 +31,9 @@
#define STRINGIFY(x) _STRINGIFY(x)
#define EXTERNAL_NAME(name) asm(STRINGIFY(__USER_LABEL_PREFIX__) #name)

bool __atomic_is_lock_free_c(size_t size, void *ptr)
EXTERNAL_NAME(__atomic_is_lock_free);

void __atomic_load_c(int size, void *src, void *dest,
int model) EXTERNAL_NAME(__atomic_load);

Expand Down Expand Up @@ -573,11 +578,86 @@ void test_fetch_op(void) {
}
}

void test_is_lock_free(void) {
// The result of __atomic_is_lock_free is architecture dependent, so we only
// check for a true return value for the sizes where we know that at compile
// time that they are supported. If __atomic_always_lock_free() returns false
// for a given size, we can only check that __atomic_is_lock_free() returns
// false for unaligned values.
// Note: This assumption will have to be revisited when we support an
// architecture that allows for unaligned atomics.
// XXX: Do any architectures report true for unaligned atomics?

// All atomic.c implementations fall back to the non-specialized case for
// size=0, so despite the operation being a no-op, they still take locks and
// therefore __atomic_is_lock_free should return false.
assert(!__atomic_is_lock_free_c(0, NULL) && "size zero should never be lock-free");
assert(!__atomic_is_lock_free_c(0, (void *)8) && "size zero should never be lock-free");

if (__atomic_always_lock_free(1, 0)) {
assert(__atomic_is_lock_free_c(1, NULL) && "aligned size=1 should always be lock-free");
assert(__atomic_is_lock_free_c(1, (void *)1) && "aligned size=1 should always be lock-free");
}

if (__atomic_always_lock_free(2, 0)) {
assert(__atomic_is_lock_free_c(2, NULL) && "aligned size=2 should always be lock-free");
assert(__atomic_is_lock_free_c(2, (void *)2) && "aligned size=2 should always be lock-free");
}
assert(!__atomic_is_lock_free_c(2, (void *)1) && "unaligned size=2 should not be lock-free");

if (__atomic_always_lock_free(4, 0)) {
assert(__atomic_is_lock_free_c(4, NULL) && "aligned size=4 should always be lock-free");
assert(__atomic_is_lock_free_c(4, (void *)4) && "aligned size=4 should always be lock-free");
}
assert(!__atomic_is_lock_free_c(4, (void *)3) && "unaligned size=4 should not be lock-free");
assert(!__atomic_is_lock_free_c(4, (void *)2) && "unaligned size=4 should not be lock-free");
assert(!__atomic_is_lock_free_c(4, (void *)1) && "unaligned size=4 should not be lock-free");

if (__atomic_always_lock_free(8, 0)) {
assert(__atomic_is_lock_free_c(8, NULL) && "aligned size=8 should always be lock-free");
assert(__atomic_is_lock_free_c(8, (void *)8) && "aligned size=8 should always be lock-free");
}
assert(!__atomic_is_lock_free_c(8, (void *)7) && "unaligned size=8 should not be lock-free");
assert(!__atomic_is_lock_free_c(8, (void *)4) && "unaligned size=8 should not be lock-free");
assert(!__atomic_is_lock_free_c(8, (void *)2) && "unaligned size=8 should not be lock-free");
assert(!__atomic_is_lock_free_c(8, (void *)1) && "unaligned size=8 should not be lock-free");

if (__atomic_always_lock_free(16, 0)) {
assert(__atomic_is_lock_free_c(16, NULL) && "aligned size=16 should always be lock-free");
assert(__atomic_is_lock_free_c(16, (void *)16) && "aligned size=16 should always be lock-free");
}
assert(!__atomic_is_lock_free_c(16, (void *)15) && "unaligned size=16 should not be lock-free");
assert(!__atomic_is_lock_free_c(16, (void *)8) && "unaligned size=16 should not be lock-free");
assert(!__atomic_is_lock_free_c(16, (void *)4) && "unaligned size=16 should not be lock-free");
assert(!__atomic_is_lock_free_c(16, (void *)2) && "unaligned size=16 should not be lock-free");
assert(!__atomic_is_lock_free_c(16, (void *)1) && "unaligned size=16 should not be lock-free");

// In the current implementation > 16 bytes are never lock-free:
assert(!__atomic_is_lock_free_c(32, NULL) && "aligned size=32 should not be lock-free");
assert(!__atomic_is_lock_free_c(32, (void*)32) && "aligned size=32 should not be lock-free");
assert(!__atomic_is_lock_free_c(32, (void*)31) && "unaligned size=32 should not be lock-free");

// We also don't support non-power-of-two sizes:
assert(!__atomic_is_lock_free_c(3, NULL) && "aligned size=3 should not be lock-free");
assert(!__atomic_is_lock_free_c(5, NULL) && "aligned size=5 should not be lock-free");
assert(!__atomic_is_lock_free_c(6, NULL) && "aligned size=6 should not be lock-free");
assert(!__atomic_is_lock_free_c(7, NULL) && "aligned size=7 should not be lock-free");
assert(!__atomic_is_lock_free_c(9, NULL) && "aligned size=9 should not be lock-free");
assert(!__atomic_is_lock_free_c(10, NULL) && "aligned size=10 should not be lock-free");
assert(!__atomic_is_lock_free_c(11, NULL) && "aligned size=11 should not be lock-free");
assert(!__atomic_is_lock_free_c(12, NULL) && "aligned size=12 should not be lock-free");
assert(!__atomic_is_lock_free_c(13, NULL) && "aligned size=13 should not be lock-free");
assert(!__atomic_is_lock_free_c(14, NULL) && "aligned size=14 should not be lock-free");
assert(!__atomic_is_lock_free_c(15, NULL) && "aligned size=15 should not be lock-free");
assert(!__atomic_is_lock_free_c(17, NULL) && "aligned size=17 should not be lock-free");
}

int main() {
test_loads();
test_stores();
test_exchanges();
test_compare_exchanges();
test_fetch_op();
test_is_lock_free();
return 0;
}

0 comments on commit 00530de

Please sign in to comment.