diff --git a/libc/src/__support/CMakeLists.txt b/libc/src/__support/CMakeLists.txt index 2196d9e23bba7..c9d89cf6fc286 100644 --- a/libc/src/__support/CMakeLists.txt +++ b/libc/src/__support/CMakeLists.txt @@ -398,6 +398,30 @@ add_header_library( libc.src.__support.macros.attributes ) +add_header_library( + aba_ptr + HDRS + aba_ptr.h + DEPENDS + libc.hdr.types.size_t + libc.src.__support.common + libc.src.__support.threads.sleep +) + +add_header_library( + mpmc_stack + HDRS + mpmc_stack.h + DEPENDS + libc.src.__support.aba_ptr + libc.src.__support.common + libc.src.__support.CPP.atomic + libc.src.__support.CPP.new + libc.src.__support.CPP.optional + libc.src.__support.CPP.type_traits +) + + add_subdirectory(FPUtil) add_subdirectory(OSUtil) add_subdirectory(StringUtil) diff --git a/libc/src/__support/aba_ptr.h b/libc/src/__support/aba_ptr.h new file mode 100644 index 0000000000000..3e55aea8875a3 --- /dev/null +++ b/libc/src/__support/aba_ptr.h @@ -0,0 +1,83 @@ +//===-- Transactional Ptr for ABA prevention --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC___SUPPORT_TAGGED_POINTER_H +#define LLVM_LIBC_SRC___SUPPORT_TAGGED_POINTER_H + +#include "hdr/types/size_t.h" +#include "src/__support/common.h" +#include "src/__support/threads/sleep.h" + +#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 +#define LIBC_ABA_PTR_IS_ATOMIC true +#else +#define LIBC_ABA_PTR_IS_ATOMIC false +#endif + +namespace LIBC_NAMESPACE_DECL { + +template struct AbaPtrImpl { + union Impl { + struct alignas(2 * alignof(void *)) Atomic { + T *ptr; + size_t tag; + } atomic; + struct Mutex { + T *ptr; + bool locked; + } mutex; + } impl; + + LIBC_INLINE constexpr AbaPtrImpl(T *ptr) + : impl(IsAtomic ? Impl{.atomic{ptr, 0}} : Impl{.mutex{ptr, false}}) {} + + /// User must guarantee that operation is redoable. + template LIBC_INLINE void transaction(Op &&op) { + if constexpr (IsAtomic) { + for (;;) { + typename Impl::Atomic snapshot, next; + __atomic_load(&impl.atomic, &snapshot, __ATOMIC_RELAXED); + next.ptr = op(snapshot.ptr); + // Wrapping add for unsigned integers. + next.tag = snapshot.tag + 1; + if (__atomic_compare_exchange(&impl.atomic, &snapshot, &next, true, + __ATOMIC_ACQ_REL, __ATOMIC_RELAXED)) + return; + } + } else { + // Acquire the lock. + while (__atomic_exchange_n(&impl.mutex.locked, true, __ATOMIC_ACQUIRE)) + while (__atomic_load_n(&impl.mutex.locked, __ATOMIC_RELAXED)) + LIBC_NAMESPACE::sleep_briefly(); + + impl.mutex.ptr = op(impl.mutex.ptr); + // Release the lock. + __atomic_store_n(&impl.mutex.locked, false, __ATOMIC_RELEASE); + } + } + + LIBC_INLINE T *get() const { + if constexpr (IsAtomic) { + // Weak micro-architectures typically regards simultaneous partial word + // loading and full word loading as a race condition. While there are + // implementations that uses racy read anyway, we still load the whole + // word to avoid any complications. + typename Impl::Atomic snapshot; + __atomic_load(&impl.atomic, &snapshot, __ATOMIC_RELAXED); + return snapshot.ptr; + } else { + return impl.mutex.ptr; + } + } +}; + +template using AbaPtr = AbaPtrImpl; +} // namespace LIBC_NAMESPACE_DECL + +#undef LIBC_ABA_PTR_IS_ATOMIC +#endif diff --git a/libc/src/__support/mpmc_stack.h b/libc/src/__support/mpmc_stack.h new file mode 100644 index 0000000000000..df235c2c1dfac --- /dev/null +++ b/libc/src/__support/mpmc_stack.h @@ -0,0 +1,107 @@ +//===-- Simple Lock-free MPMC Stack -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIBC_SRC___SUPPORT_MPMC_STACK_H +#define LLVM_LIBC_SRC___SUPPORT_MPMC_STACK_H + +#include "src/__support/CPP/atomic.h" +#include "src/__support/CPP/new.h" +#include "src/__support/CPP/optional.h" +#include "src/__support/aba_ptr.h" + +namespace LIBC_NAMESPACE_DECL { +template class MPMCStack { + struct Node { + cpp::Atomic visitor; + Node *next; + T value; + + LIBC_INLINE Node(T val) : visitor(0), next(nullptr), value(val) {} + }; + AbaPtr head; + +public: + static_assert(cpp::is_copy_constructible::value, + "T must be copy constructible"); + LIBC_INLINE constexpr MPMCStack() : head(nullptr) {} + LIBC_INLINE bool push(T value) { + AllocChecker ac; + Node *new_node = new (ac) Node(value); + if (!ac) + return false; + head.transaction([new_node](Node *old_head) { + new_node->next = old_head; + return new_node; + }); + return true; + } + LIBC_INLINE bool push_all(T values[], size_t count) { + struct Guard { + size_t count; + Node **allocated; + LIBC_INLINE Guard(Node *allocated[]) : count(0), allocated(allocated) {} + LIBC_INLINE ~Guard() { + for (size_t i = 0; i < count; ++i) + delete allocated[i]; + } + LIBC_INLINE void add(Node *node) { allocated[count++] = node; } + LIBC_INLINE void clear() { count = 0; } + }; + // Variable sized array is a GNU extension. + __extension__ Node *allocated[count]; + { + Guard guard(allocated); + for (size_t i = 0; i < count; ++i) { + AllocChecker ac; + Node *new_node = new (ac) Node(values[i]); + if (!ac) + return false; + guard.add(new_node); + if (i != 0) + new_node->next = allocated[i - 1]; + } + guard.clear(); + } + head.transaction([&allocated, count](Node *old_head) { + allocated[0]->next = old_head; + return allocated[count - 1]; + }); + return true; + } + LIBC_INLINE cpp::optional pop() { + cpp::optional res = cpp::nullopt; + Node *node = nullptr; + head.transaction([&](Node *current_head) -> Node * { + if (!current_head) { + res = cpp::nullopt; + return nullptr; + } + node = current_head; + node->visitor.fetch_add(1); + res = cpp::optional{node->value}; + Node *next = node->next; + node->visitor.fetch_sub(1); + return next; + }); + // On a successful transaction, a node is popped by us. So we must delete + // it. When we are at here, no one else can acquire + // new reference to the node, but we still need to wait until other threads + // inside the transaction who may potentially be holding a reference to the + // node. + if (res) { + // Spin until the node is no longer in use. + while (node->visitor.load() != 0) + LIBC_NAMESPACE::sleep_briefly(); + delete node; + } + return res; + } +}; +} // namespace LIBC_NAMESPACE_DECL + +#endif diff --git a/libc/src/stdlib/linux/CMakeLists.txt b/libc/src/stdlib/linux/CMakeLists.txt index 1d3c00a5e0ddb..85ad2bdab33ba 100644 --- a/libc/src/stdlib/linux/CMakeLists.txt +++ b/libc/src/stdlib/linux/CMakeLists.txt @@ -9,3 +9,23 @@ add_entrypoint_object( libc.src.signal.raise libc.src.stdlib._Exit ) + +add_header_library( + vdso_rng + HDRS + vdso_rng.h + DEPENDS + libc.src.__support.threads.thread # For __cxa_thread_atexit_impl + libc.src.__support.CPP.algorithm + libc.src.__support.CPP.bit + libc.src.__support.CPP.mutex + libc.src.__support.CPP.optional + libc.src.__support.OSUtil.linux.vdso + libc.src.__support.OSUtil.osutil + libc.src.__support.macros.config + libc.src.__support.mpmc_stack + libc.src.__support.threads.callonce + libc.src.__support.threads.linux.raw_mutex + libc.src.sys.auxv.getauxval # TODO: remove public entrypoint dependency + libc.include.sys_syscall +) diff --git a/libc/src/stdlib/linux/vdso_rng.h b/libc/src/stdlib/linux/vdso_rng.h new file mode 100644 index 0000000000000..fdfa5b1872561 --- /dev/null +++ b/libc/src/stdlib/linux/vdso_rng.h @@ -0,0 +1,280 @@ +//===-- vDSO based RNG ----------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LIBC_SRC_STDLIB_LINUX_VDSO_RNG_H +#define LIBC_SRC_STDLIB_LINUX_VDSO_RNG_H + +#include "src/__support/CPP/bit.h" +#include "src/__support/CPP/mutex.h" +#include "src/__support/OSUtil/linux/vdso.h" +#include "src/__support/OSUtil/syscall.h" +#include "src/__support/common.h" +#include "src/__support/macros/config.h" +#include "src/__support/mpmc_stack.h" +#include "src/__support/threads/callonce.h" +#include "src/__support/threads/linux/raw_mutex.h" +// TODO: this is public entrypoint, we should remove it later on. +#include "src/sys/auxv/getauxval.h" + +namespace LIBC_NAMESPACE_DECL { +namespace vdso_rng { +extern "C" { +using Destructor = void(void *); +[[gnu::weak]] extern void *__dso_handle; +int __cxa_thread_atexit_impl(Destructor *, void *, void *); +} +class GlobalState { +public: + struct VGetrandomOpaqueParams { + unsigned int size_of_opaque_states; + unsigned int mmap_prot; + unsigned int mmap_flags; + unsigned int reserved[13]; + }; + + struct Config { + size_t page_size; + size_t pages_per_alloc; + size_t states_per_page; + vdso::VDSOSymType getrandom; + VGetrandomOpaqueParams params; + }; + +private: + // A lock-free stack of free opaque states. + MPMCStack free_list{}; + // A mutex protecting the allocation of new pages. + RawMutex allocation_mutex{}; + + // Shared global configuration. + static CallOnceFlag config_flag; + static Config config; + + // We grow the states by the number of CPUs. This function uses + // SYS_sched_getaffinity to get the number of CPUs. + LIBC_INLINE static size_t cpu_count(); + + // Grow available states. This function can fail if the system is out of + // memory. + // - This routine assumes that the global config is valid. + // - On success, this routine returns one opaque state for direct use. + LIBC_INLINE void *grow(); + +public: + LIBC_INLINE constexpr GlobalState() {} + LIBC_INLINE static const Config &get_config(); + LIBC_INLINE static const Config &get_config_unchecked() { return config; } + LIBC_INLINE void *get(); + LIBC_INLINE void recycle(void *state); +}; + +LIBC_INLINE_VAR GlobalState global_state{}; + +class LocalState { + bool in_flight = false; + bool failed = false; + void *state = nullptr; + +public: + struct Guard { + LocalState *tls; + LIBC_INLINE Guard(LocalState *tls) : tls(tls) { + tls->in_flight = true; + cpp::atomic_thread_fence(cpp::MemoryOrder::SEQ_CST); + } + LIBC_INLINE Guard(Guard &&other) : tls(other.tls) { other.tls = nullptr; } + LIBC_INLINE ~Guard() { + cpp::atomic_thread_fence(cpp::MemoryOrder::SEQ_CST); + if (tls) + tls->in_flight = false; + } + LIBC_INLINE void fill(void *buf, size_t size) const; + }; + LIBC_INLINE constexpr LocalState() {} + LIBC_INLINE cpp::optional get() { + if (in_flight) + return cpp::nullopt; + + Guard guard(this); + + if (!failed && !state) { + int register_res = __cxa_thread_atexit_impl( + [](void *self) { + auto *tls = static_cast(self); + // Reject all future attempts to get a state. + void *state = tls->state; + tls->in_flight = true; + tls->failed = true; + tls->state = nullptr; + cpp::atomic_thread_fence(cpp::MemoryOrder::SEQ_CST); + if (state) + LIBC_NAMESPACE::vdso_rng::global_state.recycle(state); + }, + this, __dso_handle); + if (register_res == 0) + state = LIBC_NAMESPACE::vdso_rng::global_state.get(); + if (!state) + failed = true; + } + + if (!state) + return cpp::nullopt; + + return cpp::move(guard); + } +}; + +LIBC_INLINE_VAR LIBC_THREAD_LOCAL LocalState local_state{}; + +//===----------------------------------------------------------------------===// +// Implementation +//===----------------------------------------------------------------------===// + +LIBC_INLINE_VAR GlobalState::Config GlobalState::config{}; +LIBC_INLINE_VAR CallOnceFlag GlobalState::config_flag = 0; + +LIBC_INLINE size_t GlobalState::cpu_count() { + char cpu_set[128] = {0}; + int res = LIBC_NAMESPACE::syscall_impl(SYS_sched_getaffinity, 0, + sizeof(cpu_set), cpu_set); + if (res <= 0) + return 1; + + size_t count = 0; + for (size_t i = 0; i < sizeof(cpu_set) / sizeof(unsigned long); ++i) { + unsigned long *mask_ptr = reinterpret_cast(cpu_set); + count += LIBC_NAMESPACE::cpp::popcount(mask_ptr[i]); + } + + return count > 0 ? count : 1; +} + +LIBC_INLINE const GlobalState::Config &GlobalState::get_config() { + callonce(&config_flag, []() { + config.getrandom = + LIBC_NAMESPACE::vdso::TypedSymbol{}; + if (!config.getrandom) + return; + + // Call with special flag to get the desired configuration. + int res = config.getrandom( + /*buf=*/nullptr, /*count=*/0, /*flags=*/0, + /*opaque_states=*/&config.params, + /*size_of_opaque_states=*/~0); + if (res != 0) + return; + + config.page_size = LIBC_NAMESPACE::getauxval(AT_PAGESZ); + if (!config.page_size) + return; + + size_t count = cpp::max(cpu_count(), size_t{4}); + + config.states_per_page = + config.page_size / config.params.size_of_opaque_states; + + config.pages_per_alloc = + count / config.states_per_page + (count % config.states_per_page != 0); + }); + return config; +} + +LIBC_INLINE void *GlobalState::grow() { + cpp::lock_guard guard(allocation_mutex); + + // It is possible that when we finally grab the lock, other threads have + // successfully finished the allocation already. Hence, we first try if we + // can pop anything from the free list. + if (cpp::optional state = free_list.pop()) + return *state; + + long mmap_res = LIBC_NAMESPACE::syscall_impl( + SYS_mmap, /*addr=*/nullptr, + /*length=*/config.page_size * config.pages_per_alloc, + /*prot=*/config.params.mmap_prot, + /*flags=*/config.params.mmap_flags, + /*fd=*/-1, /*offset=*/0); + if (mmap_res == -1 /* MAP_FAILED */) + return nullptr; + + char *pages = reinterpret_cast(mmap_res); + + // Initialize the page. + size_t total_states = config.pages_per_alloc * config.states_per_page; + size_t free_states = total_states - 1; // reserve one for direct use. + __extension__ void *opaque_states[total_states]; + size_t index = 0; + for (size_t p = 0; p < config.pages_per_alloc; ++p) { + char *page = &pages[p * config.page_size]; + for (size_t s = 0; s < config.states_per_page; ++s) { + void *state = &page[s * config.params.size_of_opaque_states]; + opaque_states[index++] = state; + } + } + + constexpr size_t RETRY_COUNT = 64; + for (size_t i = 0; i < RETRY_COUNT; ++i) { + if (free_list.push_all(opaque_states, free_states)) + break; + // Abort if we are still short in memory after all these retries. + if (i + 1 == RETRY_COUNT) { + LIBC_NAMESPACE::syscall_impl( + SYS_munmap, pages, config.page_size * config.pages_per_alloc); + return nullptr; + } + } + + return opaque_states[free_states]; +} + +LIBC_INLINE void *GlobalState::get() { + const Config &config = get_config(); + // If page size is not set, the global config is invalid. Early return. + if (!config.page_size) + return nullptr; + + if (cpp::optional state = free_list.pop()) + return *state; + + // At this stage, we know that the config is valid. + return grow(); +} + +LIBC_INLINE void GlobalState::recycle(void *state) { + LIBC_ASSERT(state != nullptr); + constexpr size_t RETRY_COUNT = 64; + for (size_t i = 0; i < RETRY_COUNT; ++i) + if (free_list.push(state)) + return; + // Otherwise, we just let it leak. It won't be too bad not to reuse the state + // since the OS can free the page if memory is tight. +} + +//===----------------------------------------------------------------------===// +// LocalState +//===----------------------------------------------------------------------===// + +LIBC_INLINE void LocalState::Guard::fill(void *buf, size_t size) const { + LIBC_ASSERT(tls->state != nullptr); + char *cursor = reinterpret_cast(buf); + size_t remaining = size; + const auto &config = GlobalState::get_config_unchecked(); + while (remaining > 0) { + int res = config.getrandom(cursor, remaining, /* default random flag */ 0, + tls->state, config.params.size_of_opaque_states); + if (res < 0) + continue; + remaining -= static_cast(res); + cursor += res; + } +} + +} // namespace vdso_rng +} // namespace LIBC_NAMESPACE_DECL + +#endif // LIBC_SRC_STDLIB_LINUX_VDSO_RNG_H diff --git a/libc/test/integration/src/__support/CMakeLists.txt b/libc/test/integration/src/__support/CMakeLists.txt index b5b6557e8d689..93f54083f3c00 100644 --- a/libc/test/integration/src/__support/CMakeLists.txt +++ b/libc/test/integration/src/__support/CMakeLists.txt @@ -2,3 +2,18 @@ add_subdirectory(threads) if(LIBC_TARGET_OS_IS_GPU) add_subdirectory(GPU) endif() + +add_libc_integration_test_suite(libc-support-integration-tests) + +add_integration_test( + mpmc_stack_test + SUITE + libc-support-integration-tests + SRCS + mpmc_stack_test.cpp + DEPENDS + libc.src.__support.mpmc_stack + libc.src.__support.threads.thread + libc.src.pthread.pthread_create + libc.src.pthread.pthread_join +) diff --git a/libc/test/integration/src/__support/mpmc_stack_test.cpp b/libc/test/integration/src/__support/mpmc_stack_test.cpp new file mode 100644 index 0000000000000..9166a816a74fe --- /dev/null +++ b/libc/test/integration/src/__support/mpmc_stack_test.cpp @@ -0,0 +1,119 @@ +#include "src/__support/CPP/atomic.h" +#include "src/__support/mpmc_stack.h" +#include "src/pthread/pthread_create.h" +#include "src/pthread/pthread_join.h" +#include "test/IntegrationTest/test.h" + +using namespace LIBC_NAMESPACE; + +void smoke_test() { + MPMCStack stack; + for (int i = 0; i <= 100; ++i) + if (!stack.push(i)) + __builtin_trap(); + for (int i = 100; i >= 0; --i) + if (*stack.pop() != i) + __builtin_trap(); + if (stack.pop()) + __builtin_trap(); // Should be empty now. +} + +void multithread_test() { + constexpr static size_t NUM_THREADS = 5; + constexpr static size_t NUM_PUSHES = 100; + struct State { + MPMCStack stack; + cpp::Atomic counter = 0; + cpp::Atomic flags[NUM_PUSHES]; + } state; + pthread_t threads[NUM_THREADS]; + for (size_t i = 0; i < NUM_THREADS; ++i) { + LIBC_NAMESPACE::pthread_create( + &threads[i], nullptr, + [](void *arg) -> void * { + State *state = static_cast(arg); + for (;;) { + size_t current = state->counter.fetch_add(1); + if (current >= NUM_PUSHES) + break; + if (!state->stack.push(current)) + __builtin_trap(); + } + while (auto res = state->stack.pop()) + state->flags[res.value()].store(true); + return nullptr; + }, + &state); + } + for (pthread_t thread : threads) + LIBC_NAMESPACE::pthread_join(thread, nullptr); + while (cpp::optional res = state.stack.pop()) + state.flags[res.value()].store(true); + for (size_t i = 0; i < NUM_PUSHES; ++i) + if (!state.flags[i].load()) + __builtin_trap(); +} + +void multithread_push_all_test() { + constexpr static size_t NUM_THREADS = 4; + constexpr static size_t BATCH_SIZE = 10; + constexpr static size_t NUM_BATCHES = 20; + struct State { + MPMCStack stack; + cpp::Atomic counter = 0; + cpp::Atomic flags[NUM_THREADS * BATCH_SIZE * NUM_BATCHES]; + } state; + pthread_t threads[NUM_THREADS]; + + for (size_t i = 0; i < NUM_THREADS; ++i) { + LIBC_NAMESPACE::pthread_create( + &threads[i], nullptr, + [](void *arg) -> void * { + State *state = static_cast(arg); + size_t values[BATCH_SIZE]; + + for (size_t batch = 0; batch < NUM_BATCHES; ++batch) { + // Prepare batch of values + for (size_t j = 0; j < BATCH_SIZE; ++j) { + size_t current = state->counter.fetch_add(1); + values[j] = current; + } + + // Push all values in batch + if (!state->stack.push_all(values, BATCH_SIZE)) + __builtin_trap(); + } + + // Pop and mark all values + while (auto res = state->stack.pop()) { + size_t value = res.value(); + if (value < NUM_THREADS * BATCH_SIZE * NUM_BATCHES) + state->flags[value].store(true); + } + return nullptr; + }, + &state); + } + + for (pthread_t thread : threads) + LIBC_NAMESPACE::pthread_join(thread, nullptr); + + // Pop any remaining values + while (cpp::optional res = state.stack.pop()) { + size_t value = res.value(); + if (value < NUM_THREADS * BATCH_SIZE * NUM_BATCHES) + state.flags[value].store(true); + } + + // Verify all values were processed + for (size_t i = 0; i < NUM_THREADS * BATCH_SIZE * NUM_BATCHES; ++i) + if (!state.flags[i].load()) + __builtin_trap(); +} + +TEST_MAIN() { + smoke_test(); + multithread_test(); + multithread_push_all_test(); + return 0; +} diff --git a/libc/test/integration/src/stdlib/linux/CMakeLists.txt b/libc/test/integration/src/stdlib/linux/CMakeLists.txt new file mode 100644 index 0000000000000..c81ce09f67456 --- /dev/null +++ b/libc/test/integration/src/stdlib/linux/CMakeLists.txt @@ -0,0 +1,14 @@ +add_custom_target(stdlib-linux-integration-tests) +add_dependencies(libc-integration-tests stdlib-linux-integration-tests) + +add_integration_test( + vdso_rng_test + SUITE + stdlib-linux-integration-tests + SRCS + vdso_rng_test.cpp + DEPENDS + libc.src.pthread.pthread_create + libc.src.pthread.pthread_join + libc.src.stdlib.linux.vdso_rng +) diff --git a/libc/test/integration/src/stdlib/linux/vdso_rng_test.cpp b/libc/test/integration/src/stdlib/linux/vdso_rng_test.cpp new file mode 100644 index 0000000000000..b16d0cd8e896b --- /dev/null +++ b/libc/test/integration/src/stdlib/linux/vdso_rng_test.cpp @@ -0,0 +1,68 @@ +//===-- Test for vdso_rng functionality ----------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "src/pthread/pthread_create.h" +#include "src/pthread/pthread_join.h" +#include "src/stdlib/linux/vdso_rng.h" +#include "test/IntegrationTest/test.h" + +using namespace LIBC_NAMESPACE; + +void basic_test() { + // Test basic functionality + vdso_rng::LocalState &local_state = vdso_rng::local_state; + + // Try to get a guard + if (auto guard = local_state.get()) { + // Fill a small buffer with random data + char buffer[256]{}; + guard->fill(buffer, sizeof(buffer)); + + // Basic sanity check - count zero bytes. + // With 256 bytes, getting more than ~10 zero bytes would be suspicious + size_t zero_count = 0; + for (auto &i : buffer) + if (i == 0) + zero_count++; + + // With uniform distribution, expect ~1 zero byte per 256 bytes + // Having more than 16 zero bytes in 256 bytes is very unlikely + if (zero_count > 16) + __builtin_trap(); + } + // If we can't get a guard, that's okay - the vDSO might not be available + // or the system might not support getrandom +} + +void multithread_test() { + constexpr static size_t OUTER_REPEAT = 8; + constexpr static size_t INNER_REPEAT = 32; + constexpr static size_t NUM_THREADS = 16; + pthread_t threads[NUM_THREADS]; + + // Repeat outer loop so that + for (size_t r = 0; r < OUTER_REPEAT; ++r) { + for (pthread_t &thread : threads) + LIBC_NAMESPACE::pthread_create( + &thread, nullptr, + [](void *) -> void * { + for (size_t j = 0; j < INNER_REPEAT; ++j) + basic_test(); + return nullptr; + }, + nullptr); + for (pthread_t thread : threads) + LIBC_NAMESPACE::pthread_join(thread, nullptr); + } +} + +TEST_MAIN() { + basic_test(); + multithread_test(); + return 0; +}