diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp index 302701e1cad09..a2b43d80746e8 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp @@ -275,7 +275,7 @@ address BarrierSetAssembler::patching_epoch_addr() { } void BarrierSetAssembler::increment_patching_epoch() { - Atomic::inc(&_patching_epoch); + AtomicAccess::inc(&_patching_epoch); } void BarrierSetAssembler::clear_patching_epoch() { diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp index 3a4ba913a8f90..4d5ca01b6b497 100644 --- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp @@ -112,22 +112,22 @@ class NativeNMethodBarrier { } int get_value() { - return Atomic::load_acquire(guard_addr()); + return AtomicAccess::load_acquire(guard_addr()); } void set_value(int value, int bit_mask) { if (bit_mask == ~0) { - Atomic::release_store(guard_addr(), value); + AtomicAccess::release_store(guard_addr(), value); return; } assert((value & ~bit_mask) == 0, "trying to set bits outside the mask"); value &= bit_mask; - int old_value = Atomic::load(guard_addr()); + int old_value = AtomicAccess::load(guard_addr()); while (true) { // Only bits in the mask are changed int new_value = value | (old_value & ~bit_mask); if (new_value == old_value) break; - int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release); + int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release); if (v == old_value) break; old_value = v; } diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp index fa7329f49428f..3f1a9f7daaacb 100644 --- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp @@ -42,7 +42,7 @@ #include "prims/methodHandles.hpp" #include "prims/upcallLinker.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/continuationEntry.inline.hpp" #include "runtime/frame.inline.hpp" @@ -10265,7 +10265,7 @@ class StubGenerator: public StubCodeGenerator { #if defined (LINUX) && !defined (__ARM_FEATURE_ATOMICS) - // ARMv8.1 LSE versions of the atomic stubs used by Atomic::PlatformXX. + // ARMv8.1 LSE versions of the atomic stubs used by AtomicAccess::PlatformXX. // // If LSE is in use, generate LSE versions of all the stubs. The // non-LSE versions are in atomic_aarch64.S. diff --git a/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp b/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp index 81b14f28c35be..2c93b5ac549e3 100644 --- a/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp +++ b/src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp @@ -48,22 +48,22 @@ class NativeNMethodBarrier: public NativeInstruction { public: int get_value() { - return Atomic::load_acquire(guard_addr()); + return AtomicAccess::load_acquire(guard_addr()); } void set_value(int value, int bit_mask) { if (bit_mask == ~0) { - Atomic::release_store(guard_addr(), value); + AtomicAccess::release_store(guard_addr(), value); return; } assert((value & ~bit_mask) == 0, "trying to set bits outside the mask"); value &= bit_mask; - int old_value = Atomic::load(guard_addr()); + int old_value = AtomicAccess::load(guard_addr()); while (true) { // Only bits in the mask are changed int new_value = value | (old_value & ~bit_mask); if (new_value == old_value) break; - int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release); + int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release); if (v == old_value) break; old_value = v; } diff --git a/src/hotspot/cpu/arm/stubGenerator_arm.cpp b/src/hotspot/cpu/arm/stubGenerator_arm.cpp index b81400ae877f7..2e2e0f7a4b969 100644 --- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp +++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp @@ -421,7 +421,8 @@ class StubGenerator: public StubCodeGenerator { } - // As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as: + // As per atomicAccess.hpp the atomic read-modify-write operations must be + // logically implemented as: // ; ; // But for load-linked/store-conditional based systems a fence here simply means // no load/store can be reordered with respect to the initial load-linked, so we have: @@ -440,7 +441,7 @@ class StubGenerator: public StubCodeGenerator { // be removed in the future. // Implementation of atomic_add(jint add_value, volatile jint* dest) - // used by Atomic::add(volatile jint* dest, jint add_value) + // used by AtomicAccess::add(volatile jint* dest, jint add_value) // // Arguments : // @@ -492,7 +493,7 @@ class StubGenerator: public StubCodeGenerator { } // Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest) - // used by Atomic::add(volatile jint* dest, jint exchange_value) + // used by AtomicAccess::add(volatile jint* dest, jint exchange_value) // // Arguments : // @@ -542,7 +543,7 @@ class StubGenerator: public StubCodeGenerator { } // Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value) - // used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value) + // used by AtomicAccess::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value) // // Arguments : // @@ -582,7 +583,7 @@ class StubGenerator: public StubCodeGenerator { return start; } - // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) + // Support for jlong AtomicAccess::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) // reordered before by a wrapper to (jlong compare_value, jlong exchange_value, volatile jlong *dest) // // Arguments : diff --git a/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp b/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp index 02423e13308e6..7aca2c9db45a9 100644 --- a/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp @@ -73,7 +73,7 @@ class NativeNMethodBarrier: public NativeInstruction { u_char buf[NativeMovRegMem::instruction_size]; uint64_t u64; } new_mov_instr, old_mov_instr; - new_mov_instr.u64 = old_mov_instr.u64 = Atomic::load(instr); + new_mov_instr.u64 = old_mov_instr.u64 = AtomicAccess::load(instr); while (true) { // Only bits in the mask are changed int old_value = nativeMovRegMem_at(old_mov_instr.buf)->offset(); @@ -81,7 +81,7 @@ class NativeNMethodBarrier: public NativeInstruction { if (new_value == old_value) return; // skip icache flush if nothing changed nativeMovRegMem_at(new_mov_instr.buf)->set_offset(new_value, false /* no icache flush */); // Swap in the new value - uint64_t v = Atomic::cmpxchg(instr, old_mov_instr.u64, new_mov_instr.u64, memory_order_relaxed); + uint64_t v = AtomicAccess::cmpxchg(instr, old_mov_instr.u64, new_mov_instr.u64, memory_order_relaxed); if (v == old_mov_instr.u64) break; old_mov_instr.u64 = v; } diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp index ca492329729c0..4e93992f4134a 100644 --- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp +++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp @@ -347,7 +347,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) // Finally patch out the jump. volatile juint *jump_addr = (volatile juint*)instr_addr; // Release not needed because caller uses invalidate_range after copying the remaining bytes. - //Atomic::release_store(jump_addr, *((juint*)code_buffer)); + //AtomicAccess::release_store(jump_addr, *((juint*)code_buffer)); *jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size); } diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp index 387db778c1fd1..f5916000890c4 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp @@ -217,7 +217,7 @@ address BarrierSetAssembler::patching_epoch_addr() { } void BarrierSetAssembler::increment_patching_epoch() { - Atomic::inc(&_patching_epoch); + AtomicAccess::inc(&_patching_epoch); } void BarrierSetAssembler::clear_patching_epoch() { diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp index 4fa9b4b04fb46..5003b9584a318 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp @@ -106,22 +106,22 @@ class NativeNMethodBarrier { } int get_value() { - return Atomic::load_acquire(guard_addr()); + return AtomicAccess::load_acquire(guard_addr()); } void set_value(int value, int bit_mask) { if (bit_mask == ~0) { - Atomic::release_store(guard_addr(), value); + AtomicAccess::release_store(guard_addr(), value); return; } assert((value & ~bit_mask) == 0, "trying to set bits outside the mask"); value &= bit_mask; - int old_value = Atomic::load(guard_addr()); + int old_value = AtomicAccess::load(guard_addr()); while (true) { // Only bits in the mask are changed int new_value = value | (old_value & ~bit_mask); if (new_value == old_value) break; - int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release); + int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release); if (v == old_value) break; old_value = v; } diff --git a/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp b/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp index 1a609ad8d451a..8f43f4ef723a5 100644 --- a/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp +++ b/src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp @@ -64,12 +64,12 @@ class NativeMethodBarrier: public NativeInstruction { assert((value & ~bit_mask) == 0, "trying to set bits outside the mask"); value &= bit_mask; int32_t* data_addr = (int32_t*)get_patchable_data_address(); - int old_value = Atomic::load(data_addr); + int old_value = AtomicAccess::load(data_addr); while (true) { // Only bits in the mask are changed int new_value = value | (old_value & ~bit_mask); if (new_value == old_value) break; - int v = Atomic::cmpxchg(data_addr, old_value, new_value, memory_order_release); + int v = AtomicAccess::cmpxchg(data_addr, old_value, new_value, memory_order_release); if (v == old_value) break; old_value = v; } diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp index 124daef4fa7f3..40311f746ea3d 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp @@ -65,12 +65,12 @@ class NativeNMethodCmpBarrier: public NativeInstruction { assert(align_up(immediate_address(), sizeof(jint)) == align_down(immediate_address(), sizeof(jint)), "immediate not aligned"); jint* data_addr = (jint*)immediate_address(); - jint old_value = Atomic::load(data_addr); + jint old_value = AtomicAccess::load(data_addr); while (true) { // Only bits in the mask are changed jint new_value = imm | (old_value & ~bit_mask); if (new_value == old_value) break; - jint v = Atomic::cmpxchg(data_addr, old_value, new_value, memory_order_release); + jint v = AtomicAccess::cmpxchg(data_addr, old_value, new_value, memory_order_release); if (v == old_value) break; old_value = v; } diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index aa119210f47b8..640ba383d0ef6 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -43,7 +43,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/globals_extension.hpp" #include "runtime/interfaceSupport.inline.hpp" diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index 4f5fed2c8c044..0387635a7b93b 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -39,7 +39,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/globals_extension.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -809,7 +809,7 @@ jlong os::javaTimeNanos() { if (now <= prev) { return prev; // same or retrograde time; } - const uint64_t obsv = Atomic::cmpxchg(&Bsd::_max_abstime, prev, now); + const uint64_t obsv = AtomicAccess::cmpxchg(&Bsd::_max_abstime, prev, now); assert(obsv >= prev, "invariant"); // Monotonicity // If the CAS succeeded then we're done and return "now". // If the CAS failed and the observed value "obsv" is >= now then @@ -2135,14 +2135,14 @@ uint os::processor_id() { __asm__ ("cpuid\n\t" : "+a" (eax), "+b" (ebx), "+c" (ecx), "+d" (edx) : ); uint apic_id = (ebx >> 24) & (processor_id_map_size - 1); - int processor_id = Atomic::load(&processor_id_map[apic_id]); + int processor_id = AtomicAccess::load(&processor_id_map[apic_id]); while (processor_id < 0) { // Assign processor id to APIC id - processor_id = Atomic::cmpxchg(&processor_id_map[apic_id], processor_id_unassigned, processor_id_assigning); + processor_id = AtomicAccess::cmpxchg(&processor_id_map[apic_id], processor_id_unassigned, processor_id_assigning); if (processor_id == processor_id_unassigned) { - processor_id = Atomic::fetch_then_add(&processor_id_next, 1) % os::processor_count(); - Atomic::store(&processor_id_map[apic_id], processor_id); + processor_id = AtomicAccess::fetch_then_add(&processor_id_next, 1) % os::processor_count(); + AtomicAccess::store(&processor_id_map[apic_id], processor_id); } } diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index d133813feb07e..f98586b5e70c8 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -42,7 +42,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/globals_extension.hpp" #include "runtime/init.hpp" @@ -4783,8 +4783,8 @@ static bool should_warn_invalid_processor_id() { static volatile int warn_once = 1; - if (Atomic::load(&warn_once) == 0 || - Atomic::xchg(&warn_once, 0) == 0) { + if (AtomicAccess::load(&warn_once) == 0 || + AtomicAccess::xchg(&warn_once, 0) == 0) { // Don't warn more than once return false; } diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp index 8f188ef002fe9..1a04cbba0ded1 100644 --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -31,7 +31,7 @@ #include "nmt/memTracker.hpp" #include "os_posix.inline.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/globals_extension.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -1691,7 +1691,7 @@ void PlatformEvent::park() { // AKA "down()" // atomically decrement _event for (;;) { v = _event; - if (Atomic::cmpxchg(&_event, v, v - 1) == v) break; + if (AtomicAccess::cmpxchg(&_event, v, v - 1) == v) break; } guarantee(v >= 0, "invariant"); @@ -1738,7 +1738,7 @@ int PlatformEvent::park_nanos(jlong nanos) { // atomically decrement _event for (;;) { v = _event; - if (Atomic::cmpxchg(&_event, v, v - 1) == v) break; + if (AtomicAccess::cmpxchg(&_event, v, v - 1) == v) break; } guarantee(v >= 0, "invariant"); @@ -1794,7 +1794,7 @@ void PlatformEvent::unpark() { // but only in the correctly written condition checking loops of ObjectMonitor, // Mutex/Monitor, and JavaThread::sleep - if (Atomic::xchg(&_event, 1) >= 0) return; + if (AtomicAccess::xchg(&_event, 1) >= 0) return; int status = pthread_mutex_lock(_mutex); assert_status(status == 0, status, "mutex_lock"); @@ -1847,9 +1847,9 @@ void Parker::park(bool isAbsolute, jlong time) { // Optional fast-path check: // Return immediately if a permit is available. - // We depend on Atomic::xchg() having full barrier semantics + // We depend on AtomicAccess::xchg() having full barrier semantics // since we are doing a lock-free update to _counter. - if (Atomic::xchg(&_counter, 0) > 0) return; + if (AtomicAccess::xchg(&_counter, 0) > 0) return; JavaThread *jt = JavaThread::current(); diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp index 0157d354f4027..886bd45341574 100644 --- a/src/hotspot/os/posix/signals_posix.cpp +++ b/src/hotspot/os/posix/signals_posix.cpp @@ -28,7 +28,7 @@ #include "jvm.h" #include "logging/log.hpp" #include "os_posix.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" @@ -356,7 +356,7 @@ static void jdk_misc_signal_init() { void os::signal_notify(int sig) { if (sig_semaphore != nullptr) { - Atomic::inc(&pending_signals[sig]); + AtomicAccess::inc(&pending_signals[sig]); sig_semaphore->signal(); } else { // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init @@ -369,7 +369,7 @@ static int check_pending_signals() { for (;;) { for (int i = 0; i < NSIG + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { + if (n > 0 && n == AtomicAccess::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } diff --git a/src/hotspot/os/posix/suspendResume_posix.cpp b/src/hotspot/os/posix/suspendResume_posix.cpp index dbd0e791d77af..01f67a981a85b 100644 --- a/src/hotspot/os/posix/suspendResume_posix.cpp +++ b/src/hotspot/os/posix/suspendResume_posix.cpp @@ -22,7 +22,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "suspendResume_posix.hpp" /* try to switch state from state "from" to state "to" @@ -31,7 +31,7 @@ SuspendResume::State SuspendResume::switch_state(SuspendResume::State from, SuspendResume::State to) { - SuspendResume::State result = Atomic::cmpxchg(&_state, from, to); + SuspendResume::State result = AtomicAccess::cmpxchg(&_state, from, to); if (result == from) { // success return to; diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 09d8b542a1043..cbf30e082e52d 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -42,7 +42,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/globals_extension.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -2462,7 +2462,7 @@ static void jdk_misc_signal_init() { void os::signal_notify(int sig) { if (sig_sem != nullptr) { - Atomic::inc(&pending_signals[sig]); + AtomicAccess::inc(&pending_signals[sig]); sig_sem->signal(); } else { // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init @@ -2475,7 +2475,7 @@ static int check_pending_signals() { while (true) { for (int i = 0; i < NSIG + 1; i++) { jint n = pending_signals[i]; - if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) { + if (n > 0 && n == AtomicAccess::cmpxchg(&pending_signals[i], n, n - 1)) { return i; } } @@ -4298,15 +4298,15 @@ static void exit_process_or_thread(Ept what, int exit_code) { // The first thread that reached this point, initializes the critical section. if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, nullptr)) { warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); - } else if (Atomic::load_acquire(&process_exiting) == 0) { + } else if (AtomicAccess::load_acquire(&process_exiting) == 0) { if (what != EPT_THREAD) { // Atomically set process_exiting before the critical section // to increase the visibility between racing threads. - Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId()); + AtomicAccess::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId()); } EnterCriticalSection(&crit_sect); - if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) { + if (what == EPT_THREAD && AtomicAccess::load_acquire(&process_exiting) == 0) { // Remove from the array those handles of the threads that have completed exiting. for (i = 0, j = 0; i < handle_count; ++i) { res = WaitForSingleObject(handles[i], 0 /* don't wait */); @@ -4419,7 +4419,7 @@ static void exit_process_or_thread(Ept what, int exit_code) { } if (!registered && - Atomic::load_acquire(&process_exiting) != 0 && + AtomicAccess::load_acquire(&process_exiting) != 0 && process_exiting != GetCurrentThreadId()) { // Some other thread is about to call exit(), so we don't let // the current unregistered thread proceed to exit() or _endthreadex() @@ -5585,7 +5585,7 @@ int PlatformEvent::park(jlong Millis) { int v; for (;;) { v = _Event; - if (Atomic::cmpxchg(&_Event, v, v-1) == v) break; + if (AtomicAccess::cmpxchg(&_Event, v, v-1) == v) break; } guarantee((v == 0) || (v == 1), "invariant"); if (v != 0) return OS_OK; @@ -5648,7 +5648,7 @@ void PlatformEvent::park() { int v; for (;;) { v = _Event; - if (Atomic::cmpxchg(&_Event, v, v-1) == v) break; + if (AtomicAccess::cmpxchg(&_Event, v, v-1) == v) break; } guarantee((v == 0) || (v == 1), "invariant"); if (v != 0) return; @@ -5695,7 +5695,7 @@ void PlatformEvent::unpark() { // from the first park() call after an unpark() call which will help // shake out uses of park() and unpark() without condition variables. - if (Atomic::xchg(&_Event, 1) >= 0) return; + if (AtomicAccess::xchg(&_Event, 1) >= 0) return; ::SetEvent(_ParkHandle); } diff --git a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp index 722dffc150d0d..d32f7c93ecf0f 100644 --- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp +++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -93,7 +93,7 @@ inline void post_membar(atomic_memory_order order) { template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -105,8 +105,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -131,8 +131,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -156,9 +156,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -195,9 +195,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -235,15 +235,15 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not - // specified otherwise (see atomic.hpp). + // specified otherwise (see atomicAccess.hpp). // Using 32 bit internally. volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3); @@ -305,15 +305,15 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not - // specified otherwise (see atomic.hpp). + // specified otherwise (see atomicAccess.hpp). T old_value; const uint64_t zero = 0; @@ -355,15 +355,15 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not - // specified otherwise (see atomic.hpp). + // specified otherwise (see atomicAccess.hpp). T old_value; const uint64_t zero = 0; @@ -404,10 +404,10 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, } template -struct Atomic::PlatformOrderedLoad { +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { - T t = Atomic::load(p); + T t = AtomicAccess::load(p); // Use twi-isync for load_acquire (faster than lwsync). __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory"); return t; diff --git a/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp index 14c093de8dd98..1ecdd59f59e7e 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp +++ b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -34,7 +34,7 @@ // See https://patchwork.kernel.org/patch/3575821/ template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { if (order == memory_order_relaxed) { @@ -54,9 +54,9 @@ struct Atomic::PlatformAdd { template template -inline T Atomic::PlatformXchg::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; @@ -65,10 +65,10 @@ inline T Atomic::PlatformXchg::operator()(T volatile* dest, template template -inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); if (order == memory_order_conservative) { T value = compare_value; @@ -109,21 +109,21 @@ inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, } template -struct Atomic::PlatformOrderedLoad +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } }; template -struct Atomic::PlatformOrderedStore +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } }; template -struct Atomic::PlatformOrderedStore +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } diff --git a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp index 9ba246f553d88..8fbc319e766e9 100644 --- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp +++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ // Implementation of class atomic template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const; @@ -40,8 +40,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order /* order */) const { +inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -54,9 +54,9 @@ inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) @@ -67,10 +67,10 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" : "=a" (exchange_value) @@ -81,10 +81,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" : "=a" (exchange_value) @@ -96,8 +96,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, #ifdef AMD64 template<> template -inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order /* order */) const { +inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; @@ -110,9 +110,9 @@ inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) @@ -123,10 +123,10 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" : "=a" (exchange_value) @@ -145,25 +145,25 @@ extern "C" { template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } // No direct support for 8-byte xchg; emulate using cmpxchg. template<> -struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {}; +struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {}; // No direct support for 8-byte add; emulate using cmpxchg. template<> -struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {}; +struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {}; template<> template -inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { +inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); volatile int64_t dest; _Atomic_move_long(reinterpret_cast(src), reinterpret_cast(&dest)); @@ -172,8 +172,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { template<> template -inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, - T store_value) const { +inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast(&store_value), reinterpret_cast(dest)); } @@ -181,7 +181,7 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, #endif // AMD64 template<> -struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { @@ -193,7 +193,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> }; template<> -struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { @@ -205,7 +205,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> }; template<> -struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { @@ -218,7 +218,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> #ifdef AMD64 template<> -struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { diff --git a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp index 37cd93e765d97..b5cedac867bc2 100644 --- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp +++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,7 +32,7 @@ // Implementation of class atomic template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -44,8 +44,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -56,8 +56,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -68,9 +68,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); FULL_MEM_BARRIER; T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); @@ -80,9 +80,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); FULL_MEM_BARRIER; T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); @@ -92,14 +92,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, // No direct support for cmpxchg of bytes; emulate using int. template<> -struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; +struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {}; template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T value = compare_value; FULL_MEM_BARRIER; @@ -111,10 +111,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T value = compare_value; @@ -134,7 +134,7 @@ inline void atomic_copy64(const volatile void *src, volatile void *dst) { template<> template -inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { +inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); T dest; __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED); @@ -143,8 +143,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { template<> template -inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, - T store_value) const { +inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); __atomic_store(dest, &store_value, __ATOMIC_RELAXED); } diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp index fa1ab9524425a..4940cbdc2468c 100644 --- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -70,7 +70,7 @@ inline D atomic_fastcall(F stub, volatile D *dest, T1 arg1, T2 arg2) { } template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -83,8 +83,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); aarch64_atomic_stub_t stub; @@ -99,8 +99,8 @@ inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); aarch64_atomic_stub_t stub; @@ -115,9 +115,9 @@ inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T old_value = atomic_fastcall(aarch64_atomic_xchg_4_impl, dest, exchange_value); return old_value; @@ -125,8 +125,8 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T old_value = atomic_fastcall(aarch64_atomic_xchg_8_impl, dest, exchange_value); return old_value; @@ -134,10 +134,10 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); aarch64_atomic_stub_t stub; switch (order) { @@ -152,10 +152,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); aarch64_atomic_stub_t stub; switch (order) { @@ -175,10 +175,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); aarch64_atomic_stub_t stub; switch (order) { @@ -197,21 +197,21 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, } template -struct Atomic::PlatformOrderedLoad +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } }; template -struct Atomic::PlatformOrderedStore +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } }; template -struct Atomic::PlatformOrderedStore +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } diff --git a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp index 4346920cd3765..db00c347dea82 100644 --- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,7 +78,7 @@ class ARMAtomicFuncs : AllStatic { template<> template -inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { +inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); return PrimitiveConversions::cast( (*ARMAtomicFuncs::_load_long_func)(reinterpret_cast(src))); @@ -86,20 +86,20 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { template<> template -inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, - T store_value) const { +inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); (*ARMAtomicFuncs::_store_long_func)( PrimitiveConversions::cast(store_value), reinterpret_cast(dest)); } -// As per atomic.hpp all read-modify-write operations have to provide two-way +// As per atomicAccess.hpp all read-modify-write operations have to provide two-way // barriers semantics. // // For ARMv7 we add explicit barriers in the stubs. template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -111,8 +111,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); return add_using_helper(ARMAtomicFuncs::_add_func, dest, add_value); @@ -121,26 +121,26 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); return xchg_using_helper(ARMAtomicFuncs::_xchg_func, dest, exchange_value); } // No direct support for 8-byte xchg; emulate using cmpxchg. template<> -struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {}; +struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {}; // No direct support for 8-byte add; emulate using cmpxchg. template<> -struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {}; +struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {}; // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering // No direct support for cmpxchg of bytes; emulate using int. template<> -struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; +struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {}; inline int32_t reorder_cmpxchg_func(int32_t exchange_value, @@ -160,20 +160,20 @@ inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value, template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); return cmpxchg_using_helper(reorder_cmpxchg_func, dest, compare_value, exchange_value); } template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return cmpxchg_using_helper(reorder_cmpxchg_long_func, dest, compare_value, exchange_value); } diff --git a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp index 1e4eb37cdac65..9f1d90c26bdc6 100644 --- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp +++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -93,7 +93,7 @@ inline void post_membar(atomic_memory_order order) { template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -105,8 +105,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -131,8 +131,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -156,9 +156,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -195,9 +195,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that xchg doesn't necessarily do an acquire // (see synchronizer.cpp). @@ -235,15 +235,15 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not - // specified otherwise (see atomic.hpp). + // specified otherwise (see atomicAccess.hpp). // Using 32 bit internally. unsigned int old_value, loaded_value; @@ -282,15 +282,15 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not - // specified otherwise (see atomic.hpp). + // specified otherwise (see atomicAccess.hpp). T old_value; const uint64_t zero = 0; @@ -332,15 +332,15 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a 'fence_cmpxchg_fence' if not - // specified otherwise (see atomic.hpp). + // specified otherwise (see atomicAccess.hpp). T old_value; const uint64_t zero = 0; @@ -381,11 +381,11 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, } template -struct Atomic::PlatformOrderedLoad +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { - T t = Atomic::load(p); + T t = AtomicAccess::load(p); // Use twi-isync for load_acquire (faster than lwsync). __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (t) : "memory"); return t; diff --git a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp index bde8181ebf352..f713465edeb5f 100644 --- a/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp +++ b/src/hotspot/os_cpu/linux_riscv/atomic_linux_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -40,7 +40,7 @@ #endif template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { @@ -71,10 +71,10 @@ struct Atomic::PlatformAdd { #ifndef FULL_COMPILER_ATOMIC_SUPPORT template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((unused)), - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__((unused)), + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(1 == sizeof(T)); if (order != memory_order_relaxed) { @@ -122,10 +122,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__(( // See also JDK-8326936. template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)), - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)), + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); int32_t old_value; @@ -154,9 +154,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__(( template template -inline T Atomic::PlatformXchg::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { #ifndef FULL_COMPILER_ATOMIC_SUPPORT // If we add xchg for sub word and are using older compiler // it must be added here due to not using lib atomic. @@ -180,10 +180,10 @@ inline T Atomic::PlatformXchg::operator()(T volatile* dest, // __attribute__((unused)) on dest is to get rid of spurious GCC warnings. template template -inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)), - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)), + T compare_value, + T exchange_value, + atomic_memory_order order) const { #ifndef FULL_COMPILER_ATOMIC_SUPPORT STATIC_ASSERT(byte_size > 4); @@ -204,21 +204,21 @@ inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attri } template -struct Atomic::PlatformOrderedLoad +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } }; template -struct Atomic::PlatformOrderedStore +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } }; template -struct Atomic::PlatformOrderedStore +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } diff --git a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp index 3daa9d84deaf1..ec620e3907ad5 100644 --- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp +++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,7 +26,7 @@ #ifndef OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP #define OS_CPU_LINUX_S390_ATOMIC_LINUX_S390_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "runtime/vm_version.hpp" @@ -55,7 +55,7 @@ // before the other store becomes visible. //------------ -// Atomic::add +// AtomicAccess::add //------------ // These methods force the value in memory to be augmented by the passed increment. // Both, memory value and increment, are treated as 32bit signed binary integers. @@ -75,7 +75,7 @@ inline void z196_fast_sync() { } template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -87,8 +87,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -141,8 +141,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I inc, template<> template -inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -194,7 +194,7 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc, //------------- -// Atomic::xchg +// AtomicAccess::xchg //------------- // These methods force the value in memory to be replaced by the new value passed // in as argument. @@ -211,9 +211,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc, // replacement succeeded. template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order unused) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; @@ -235,9 +235,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order unused) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; @@ -258,7 +258,7 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, } //---------------- -// Atomic::cmpxchg +// AtomicAccess::cmpxchg //---------------- // These methods compare the value in memory with a given compare value. // If both values compare equal, the value in memory is replaced with @@ -288,14 +288,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, // No direct support for cmpxchg of bytes; emulate using int. template<> -struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; +struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {}; template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T cmp_val, - T xchg_val, - atomic_memory_order unused) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T cmp_val, + T xchg_val, + atomic_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; @@ -316,10 +316,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T cmp_val, - T xchg_val, - atomic_memory_order unused) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T cmp_val, + T xchg_val, + atomic_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; @@ -339,7 +339,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, } template -struct Atomic::PlatformOrderedLoad +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { T t = *p; OrderAccess::acquire(); return t; } diff --git a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp index 0156546ba9b77..561224f56be82 100644 --- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ // Implementation of class atomic template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -40,8 +40,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; @@ -54,9 +54,9 @@ inline D Atomic::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) @@ -67,10 +67,10 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<1>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) @@ -81,10 +81,10 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) @@ -97,8 +97,8 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; @@ -111,8 +111,8 @@ inline D Atomic::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) @@ -123,10 +123,10 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order /* order */) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) @@ -145,25 +145,25 @@ extern "C" { template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return cmpxchg_using_helper(_Atomic_cmpxchg_long, dest, compare_value, exchange_value); } // No direct support for 8-byte xchg; emulate using cmpxchg. template<> -struct Atomic::PlatformXchg<8> : Atomic::XchgUsingCmpxchg<8> {}; +struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {}; // No direct support for 8-byte add; emulate using cmpxchg. template<> -struct Atomic::PlatformAdd<8> : Atomic::AddUsingCmpxchg<8> {}; +struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {}; template<> template -inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { +inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); volatile int64_t dest; _Atomic_move_long(reinterpret_cast(src), reinterpret_cast(&dest)); @@ -172,8 +172,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { template<> template -inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, - T store_value) const { +inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); _Atomic_move_long(reinterpret_cast(&store_value), reinterpret_cast(dest)); } @@ -181,7 +181,7 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, #endif // AMD64 template<> -struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { @@ -193,7 +193,7 @@ struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> }; template<> -struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { @@ -205,7 +205,7 @@ struct Atomic::PlatformOrderedStore<2, RELEASE_X_FENCE> }; template<> -struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { @@ -218,7 +218,7 @@ struct Atomic::PlatformOrderedStore<4, RELEASE_X_FENCE> #ifdef AMD64 template<> -struct Atomic::PlatformOrderedStore<8, RELEASE_X_FENCE> +struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> { template void operator()(volatile T* p, T v) const { diff --git a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp index 6409942c07de9..05d567d3e28b1 100644 --- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp +++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -31,7 +31,7 @@ // Implementation of class atomic template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -43,8 +43,8 @@ struct Atomic::PlatformAdd { template<> template -inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); @@ -55,8 +55,8 @@ inline D Atomic::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { +inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); @@ -67,9 +67,9 @@ inline D Atomic::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, template<> template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); FULL_MEM_BARRIER; T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); @@ -79,9 +79,9 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); FULL_MEM_BARRIER; T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); @@ -91,14 +91,14 @@ inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, // No direct support for cmpxchg of bytes; emulate using int. template<> -struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; +struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {}; template<> template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T value = compare_value; @@ -111,10 +111,10 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, template<> template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); FULL_MEM_BARRIER; @@ -134,7 +134,7 @@ inline void atomic_copy64(const volatile void *src, volatile void *dst) { template<> template -inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { +inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); T dest; __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED); @@ -143,8 +143,8 @@ inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { template<> template -inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, - T store_value) const { +inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest, + T store_value) const { STATIC_ASSERT(8 == sizeof(T)); __atomic_store(dest, &store_value, __ATOMIC_RELAXED); } diff --git a/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp index 90fc8ecfba412..42c5b0e4a6c7c 100644 --- a/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/atomic_windows_aarch64.hpp @@ -1,4 +1,5 @@ /* + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Microsoft Corporation. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,14 +31,14 @@ #include "runtime/vm_version.hpp" -// As per atomic.hpp all read-modify-write operations have to provide two-way +// As per atomicAccess.hpp all read-modify-write operations have to provide two-way // barriers semantics. The memory_order parameter is ignored - we always provide // the strongest/most-conservative ordering // // For AARCH64 we add explicit barriers in the stubs. template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -53,9 +54,9 @@ struct Atomic::PlatformAdd { #define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline D Atomic::PlatformAdd::add_then_fetch(D volatile* dest, \ - I add_value, \ - atomic_memory_order order) const { \ + inline D AtomicAccess::PlatformAdd::add_then_fetch(D volatile* dest, \ + I add_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ @@ -70,9 +71,9 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) #define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline T Atomic::PlatformXchg::operator()(T volatile* dest, \ - T exchange_value, \ - atomic_memory_order order) const { \ + inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, \ + T exchange_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ @@ -85,16 +86,16 @@ DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64) #undef DEFINE_INTRINSIC_XCHG // Note: the order of the parameters is different between -// Atomic::PlatformCmpxchg<*>::operator() and the +// AtomicAccess::PlatformCmpxchg<*>::operator() and the // InterlockedCompareExchange* API. #define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, \ - T compare_value, \ - T exchange_value, \ - atomic_memory_order order) const { \ + inline T AtomicAccess::PlatformCmpxchg::operator()(T volatile* dest, \ + T compare_value, \ + T exchange_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ diff --git a/src/hotspot/os_cpu/windows_aarch64/copy_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/copy_windows_aarch64.hpp index c70ea69725eb6..bebe7473bfd3f 100644 --- a/src/hotspot/os_cpu/windows_aarch64/copy_windows_aarch64.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/copy_windows_aarch64.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2020, Microsoft Corporation. All rights reserved. - * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #ifndef OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_HPP #define OS_CPU_WINDOWS_AARCH64_COPY_WINDOWS_AARCH64_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include @@ -35,14 +35,14 @@ static void pd_conjoint_atomic_helper(const T* from, T* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards - Atomic::store(to++, Atomic::load(from++)); + AtomicAccess::store(to++, AtomicAccess::load(from++)); } } else { from += count - 1; to += count - 1; while (count-- > 0) { // Copy backwards - Atomic::store(to--, Atomic::load(from--)); + AtomicAccess::store(to--, AtomicAccess::load(from--)); } } } diff --git a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp index 9f7da19b328f4..4529da2909237 100644 --- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,15 +32,15 @@ // guaranteed to have acquire release semantics (w.r.t. compiler // reordering) and therefore does not even need a compiler barrier // for normal acquire release accesses. And all generalized -// bound calls like release_store go through Atomic::load -// and Atomic::store which do volatile memory accesses. +// bound calls like release_store go through AtomicAccess::load +// and AtomicAccess::store which do volatile memory accesses. template<> inline void ScopedFence::postfix() { } template<> inline void ScopedFence::prefix() { } template<> inline void ScopedFence::prefix() { } template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } template -struct Atomic::PlatformAdd { +struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; @@ -56,9 +56,9 @@ struct Atomic::PlatformAdd { #define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline D Atomic::PlatformAdd::add_then_fetch(D volatile* dest, \ - I add_value, \ - atomic_memory_order order) const { \ + inline D AtomicAccess::PlatformAdd::add_then_fetch(D volatile* dest, \ + I add_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ @@ -73,9 +73,9 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) #define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline T Atomic::PlatformXchg::operator()(T volatile* dest, \ - T exchange_value, \ - atomic_memory_order order) const { \ + inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, \ + T exchange_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ @@ -88,16 +88,16 @@ DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64) #undef DEFINE_INTRINSIC_XCHG // Note: the order of the parameters is different between -// Atomic::PlatformCmpxchg<*>::operator() and the +// AtomicAccess::PlatformCmpxchg<*>::operator() and the // InterlockedCompareExchange* API. #define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \ template<> \ template \ - inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, \ - T compare_value, \ - T exchange_value, \ - atomic_memory_order order) const { \ + inline T AtomicAccess::PlatformCmpxchg::operator()(T volatile* dest, \ + T compare_value, \ + T exchange_value, \ + atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ diff --git a/src/hotspot/os_cpu/windows_x86/copy_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/copy_windows_x86.hpp index 4298542629b22..8dfbf473562cf 100644 --- a/src/hotspot/os_cpu/windows_x86/copy_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/copy_windows_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,21 +25,21 @@ #ifndef OS_CPU_WINDOWS_X86_COPY_WINDOWS_X86_HPP #define OS_CPU_WINDOWS_X86_COPY_WINDOWS_X86_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" template static void pd_conjoint_atomic_helper(const T* from, T* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards - Atomic::store(to++, Atomic::load(from++)); + AtomicAccess::store(to++, AtomicAccess::load(from++)); } } else { from += count - 1; to += count - 1; while (count-- > 0) { // Copy backwards - Atomic::store(to--, Atomic::load(from--)); + AtomicAccess::store(to--, AtomicAccess::load(from--)); } } } diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp index fbf2bfb0f5392..637c7c46ef4e6 100644 --- a/src/hotspot/share/c1/c1_Runtime1.cpp +++ b/src/hotspot/share/c1/c1_Runtime1.cpp @@ -54,7 +54,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp index 11848b8830003..6e5816cd589de 100644 --- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp +++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp @@ -57,7 +57,7 @@ bool AOTLinkedClassBulkLoader::class_preloading_finished() { // The ConstantPools of preloaded classes have references to other preloaded classes. We don't // want any Java code (including JVMCI compiler) to use these classes until all of them // are loaded. - return Atomic::load_acquire(&_all_completed); + return AtomicAccess::load_acquire(&_all_completed); } } @@ -90,7 +90,7 @@ void AOTLinkedClassBulkLoader::load_non_javabase_classes(JavaThread* current) { } _app_completed = true; - Atomic::release_store(&_all_completed, true); + AtomicAccess::release_store(&_all_completed, true); } void AOTLinkedClassBulkLoader::load_classes_in_loader(JavaThread* current, AOTLinkedClassCategory class_category, oop class_loader_oop) { diff --git a/src/hotspot/share/cds/archiveUtils.cpp b/src/hotspot/share/cds/archiveUtils.cpp index 82d4379f6317b..43da3cf9da84c 100644 --- a/src/hotspot/share/cds/archiveUtils.cpp +++ b/src/hotspot/share/cds/archiveUtils.cpp @@ -418,7 +418,7 @@ ArchiveWorkers::ArchiveWorkers() : _task(nullptr) {} ArchiveWorkers::~ArchiveWorkers() { - assert(Atomic::load(&_state) != WORKING, "Should not be working"); + assert(AtomicAccess::load(&_state) != WORKING, "Should not be working"); } int ArchiveWorkers::max_workers() { @@ -435,11 +435,11 @@ bool ArchiveWorkers::is_parallel() { void ArchiveWorkers::start_worker_if_needed() { while (true) { - int cur = Atomic::load(&_started_workers); + int cur = AtomicAccess::load(&_started_workers); if (cur >= _num_workers) { return; } - if (Atomic::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) { + if (AtomicAccess::cmpxchg(&_started_workers, cur, cur + 1, memory_order_relaxed) == cur) { new ArchiveWorkerThread(this); return; } @@ -447,9 +447,9 @@ void ArchiveWorkers::start_worker_if_needed() { } void ArchiveWorkers::run_task(ArchiveWorkerTask* task) { - assert(Atomic::load(&_state) == UNUSED, "Should be unused yet"); - assert(Atomic::load(&_task) == nullptr, "Should not have running tasks"); - Atomic::store(&_state, WORKING); + assert(AtomicAccess::load(&_state) == UNUSED, "Should be unused yet"); + assert(AtomicAccess::load(&_task) == nullptr, "Should not have running tasks"); + AtomicAccess::store(&_state, WORKING); if (is_parallel()) { run_task_multi(task); @@ -457,8 +457,8 @@ void ArchiveWorkers::run_task(ArchiveWorkerTask* task) { run_task_single(task); } - assert(Atomic::load(&_state) == WORKING, "Should be working"); - Atomic::store(&_state, SHUTDOWN); + assert(AtomicAccess::load(&_state) == WORKING, "Should be working"); + AtomicAccess::store(&_state, SHUTDOWN); } void ArchiveWorkers::run_task_single(ArchiveWorkerTask* task) { @@ -475,8 +475,8 @@ void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) { // Set up the run and publish the task. Issue one additional finish token // to cover the semaphore shutdown path, see below. - Atomic::store(&_finish_tokens, _num_workers + 1); - Atomic::release_store(&_task, task); + AtomicAccess::store(&_finish_tokens, _num_workers + 1); + AtomicAccess::release_store(&_task, task); // Kick off pool startup by starting a single worker, and proceed // immediately to executing the task locally. @@ -494,19 +494,19 @@ void ArchiveWorkers::run_task_multi(ArchiveWorkerTask* task) { // on semaphore first, and then spin-wait for all workers to terminate. _end_semaphore.wait(); SpinYield spin; - while (Atomic::load(&_finish_tokens) != 0) { + while (AtomicAccess::load(&_finish_tokens) != 0) { spin.wait(); } OrderAccess::fence(); - assert(Atomic::load(&_finish_tokens) == 0, "All tokens are consumed"); + assert(AtomicAccess::load(&_finish_tokens) == 0, "All tokens are consumed"); } void ArchiveWorkers::run_as_worker() { assert(is_parallel(), "Should be in parallel mode"); - ArchiveWorkerTask* task = Atomic::load_acquire(&_task); + ArchiveWorkerTask* task = AtomicAccess::load_acquire(&_task); task->run(); // All work done in threads should be visible to caller. @@ -514,22 +514,22 @@ void ArchiveWorkers::run_as_worker() { // Signal the pool the work is complete, and we are exiting. // Worker cannot do anything else with the pool after this. - if (Atomic::sub(&_finish_tokens, 1, memory_order_relaxed) == 1) { + if (AtomicAccess::sub(&_finish_tokens, 1, memory_order_relaxed) == 1) { // Last worker leaving. Notify the pool it can unblock to spin-wait. // Then consume the last token and leave. _end_semaphore.signal(); - int last = Atomic::sub(&_finish_tokens, 1, memory_order_relaxed); + int last = AtomicAccess::sub(&_finish_tokens, 1, memory_order_relaxed); assert(last == 0, "Should be"); } } void ArchiveWorkerTask::run() { while (true) { - int chunk = Atomic::load(&_chunk); + int chunk = AtomicAccess::load(&_chunk); if (chunk >= _max_chunks) { return; } - if (Atomic::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) { + if (AtomicAccess::cmpxchg(&_chunk, chunk, chunk + 1, memory_order_relaxed) == chunk) { assert(0 <= chunk && chunk < _max_chunks, "Sanity"); work(chunk, _max_chunks); } diff --git a/src/hotspot/share/cds/classListParser.cpp b/src/hotspot/share/cds/classListParser.cpp index 2405d0dc2ff15..111a8ab8f66cf 100644 --- a/src/hotspot/share/cds/classListParser.cpp +++ b/src/hotspot/share/cds/classListParser.cpp @@ -47,7 +47,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "oops/constantPool.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -87,7 +87,7 @@ ClassListParser::ClassListParser(const char* file, ParseMode parse_mode) : // _instance should only be accessed by the thread that created _instance. assert(_instance == nullptr, "must be singleton"); _instance = this; - Atomic::store(&_parsing_thread, Thread::current()); + AtomicAccess::store(&_parsing_thread, Thread::current()); } FILE* ClassListParser::do_open(const char* file) { @@ -104,11 +104,11 @@ FILE* ClassListParser::do_open(const char* file) { } bool ClassListParser::is_parsing_thread() { - return Atomic::load(&_parsing_thread) == Thread::current(); + return AtomicAccess::load(&_parsing_thread) == Thread::current(); } ClassListParser::~ClassListParser() { - Atomic::store(&_parsing_thread, (Thread*)nullptr); + AtomicAccess::store(&_parsing_thread, (Thread*)nullptr); delete _indy_items; delete _interfaces; _instance = nullptr; diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index 617110203cdaf..4088f6cb643ad 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -5728,8 +5728,8 @@ void ClassFileParser::mangle_hidden_class_name(InstanceKlass* const ik) { // occupied by the archive at run time, so we know that no dynamically // loaded InstanceKlass will be placed under there. static volatile size_t counter = 0; - Atomic::cmpxchg(&counter, (size_t)0, Arguments::default_SharedBaseAddress()); // initialize it - size_t new_id = Atomic::add(&counter, (size_t)1); + AtomicAccess::cmpxchg(&counter, (size_t)0, Arguments::default_SharedBaseAddress()); // initialize it + size_t new_id = AtomicAccess::add(&counter, (size_t)1); jio_snprintf(addr_buf, 20, "0x%zx", new_id); } else { jio_snprintf(addr_buf, 20, INTPTR_FORMAT, p2i(ik)); diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp index e515966270846..1f2eb6d25ccc3 100644 --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -750,7 +750,7 @@ void ClassLoader::add_to_boot_append_entries(ClassPathEntry *new_entry) { if (_last_append_entry == nullptr) { _last_append_entry = new_entry; assert(first_append_entry() == nullptr, "boot loader's append class path entry list not empty"); - Atomic::release_store(&_first_append_entry_list, new_entry); + AtomicAccess::release_store(&_first_append_entry_list, new_entry); } else { _last_append_entry->set_next(new_entry); _last_append_entry = new_entry; diff --git a/src/hotspot/share/classfile/classLoader.hpp b/src/hotspot/share/classfile/classLoader.hpp index a946f1f4e25aa..afb0a581dcc38 100644 --- a/src/hotspot/share/classfile/classLoader.hpp +++ b/src/hotspot/share/classfile/classLoader.hpp @@ -212,7 +212,7 @@ class ClassLoader: AllStatic { // Note: boot loader append path does not support named modules. static ClassPathEntry* volatile _first_append_entry_list; static ClassPathEntry* first_append_entry() { - return Atomic::load_acquire(&_first_append_entry_list); + return AtomicAccess::load_acquire(&_first_append_entry_list); } // Last entry in linked list of appended ClassPathEntry instances diff --git a/src/hotspot/share/classfile/classLoader.inline.hpp b/src/hotspot/share/classfile/classLoader.inline.hpp index ec3993b089ea7..fd711bf62ceaa 100644 --- a/src/hotspot/share/classfile/classLoader.inline.hpp +++ b/src/hotspot/share/classfile/classLoader.inline.hpp @@ -27,14 +27,14 @@ #include "classfile/classLoader.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" // Next entry in class path -inline ClassPathEntry* ClassPathEntry::next() const { return Atomic::load_acquire(&_next); } +inline ClassPathEntry* ClassPathEntry::next() const { return AtomicAccess::load_acquire(&_next); } inline void ClassPathEntry::set_next(ClassPathEntry* next) { // may have unlocked readers, so ensure visibility. - Atomic::release_store(&_next, next); + AtomicAccess::release_store(&_next, next); } inline ClassPathEntry* ClassLoader::classpath_entry(int n) { diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp index deb2035eef2a8..dfc3b74db9636 100644 --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -72,7 +72,7 @@ #include "oops/verifyOopClosure.hpp" #include "oops/weakHandle.inline.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/safepoint.hpp" @@ -192,19 +192,19 @@ ClassLoaderData::ChunkedHandleList::~ChunkedHandleList() { OopHandle ClassLoaderData::ChunkedHandleList::add(oop o) { if (_head == nullptr || _head->_size == Chunk::CAPACITY) { Chunk* next = new Chunk(_head); - Atomic::release_store(&_head, next); + AtomicAccess::release_store(&_head, next); } oop* handle = &_head->_data[_head->_size]; NativeAccess::oop_store(handle, o); - Atomic::release_store(&_head->_size, _head->_size + 1); + AtomicAccess::release_store(&_head->_size, _head->_size + 1); return OopHandle(handle); } int ClassLoaderData::ChunkedHandleList::count() const { int count = 0; - Chunk* chunk = Atomic::load_acquire(&_head); + Chunk* chunk = AtomicAccess::load_acquire(&_head); while (chunk != nullptr) { - count += Atomic::load(&chunk->_size); + count += AtomicAccess::load(&chunk->_size); chunk = chunk->_next; } return count; @@ -217,10 +217,10 @@ inline void ClassLoaderData::ChunkedHandleList::oops_do_chunk(OopClosure* f, Chu } void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) { - Chunk* head = Atomic::load_acquire(&_head); + Chunk* head = AtomicAccess::load_acquire(&_head); if (head != nullptr) { // Must be careful when reading size of head - oops_do_chunk(f, head, Atomic::load_acquire(&head->_size)); + oops_do_chunk(f, head, AtomicAccess::load_acquire(&head->_size)); for (Chunk* c = head->_next; c != nullptr; c = c->_next) { oops_do_chunk(f, c, c->_size); } @@ -258,9 +258,9 @@ bool ClassLoaderData::ChunkedHandleList::contains(oop p) { #ifndef PRODUCT bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) { - Chunk* chunk = Atomic::load_acquire(&_head); + Chunk* chunk = AtomicAccess::load_acquire(&_head); while (chunk != nullptr) { - if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[Atomic::load(&chunk->_size)])) { + if (&(chunk->_data[0]) <= oop_handle && oop_handle < &(chunk->_data[AtomicAccess::load(&chunk->_size)])) { return true; } chunk = chunk->_next; @@ -271,12 +271,12 @@ bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) { void ClassLoaderData::clear_claim(int claim) { for (;;) { - int old_claim = Atomic::load(&_claim); + int old_claim = AtomicAccess::load(&_claim); if ((old_claim & claim) == 0) { return; } int new_claim = old_claim & ~claim; - if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { + if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { return; } } @@ -290,12 +290,12 @@ void ClassLoaderData::verify_not_claimed(int claim) { bool ClassLoaderData::try_claim(int claim) { for (;;) { - int old_claim = Atomic::load(&_claim); + int old_claim = AtomicAccess::load(&_claim); if ((old_claim & claim) == claim) { return false; } int new_claim = old_claim | claim; - if (Atomic::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { + if (AtomicAccess::cmpxchg(&_claim, old_claim, new_claim) == old_claim) { return true; } } @@ -383,7 +383,7 @@ void ClassLoaderData::oops_do(OopClosure* f, int claim_value, bool clear_mod_oop void ClassLoaderData::classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire - for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { + for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } @@ -391,7 +391,7 @@ void ClassLoaderData::classes_do(KlassClosure* klass_closure) { void ClassLoaderData::classes_do(void f(Klass * const)) { // Lock-free access requires load_acquire - for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { + for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { f(k); assert(k != k->next_link(), "no loops!"); } @@ -399,7 +399,7 @@ void ClassLoaderData::classes_do(void f(Klass * const)) { void ClassLoaderData::methods_do(void f(Method*)) { // Lock-free access requires load_acquire - for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { + for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) { InstanceKlass::cast(k)->methods_do(f); } @@ -408,7 +408,7 @@ void ClassLoaderData::methods_do(void f(Method*)) { void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_acquire - for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { + for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { // Filter out InstanceKlasses (or their ObjArrayKlasses) that have not entered the // loaded state. if (k->is_instance_klass()) { @@ -436,7 +436,7 @@ void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { void ClassLoaderData::classes_do(void f(InstanceKlass*)) { // Lock-free access requires load_acquire - for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { + for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } @@ -498,7 +498,7 @@ void ClassLoaderData::record_dependency(const Klass* k) { // It's a dependency we won't find through GC, add it. if (!_handles.contains(to)) { - NOT_PRODUCT(Atomic::inc(&_dependency_count)); + NOT_PRODUCT(AtomicAccess::inc(&_dependency_count)); LogTarget(Trace, class, loader, data) lt; if (lt.is_enabled()) { ResourceMark rm; @@ -523,7 +523,7 @@ void ClassLoaderData::add_class(Klass* k, bool publicize /* true */) { k->set_next_link(old_value); // Link the new item into the list, making sure the linked class is stable // since the list can be walked without a lock - Atomic::release_store(&_klasses, k); + AtomicAccess::release_store(&_klasses, k); if (k->is_array_klass()) { ClassLoaderDataGraph::inc_array_classes(1); } else { @@ -635,7 +635,7 @@ void ClassLoaderData::unload() { ModuleEntryTable* ClassLoaderData::modules() { // Lazily create the module entry table at first request. // Lock-free access requires load_acquire. - ModuleEntryTable* modules = Atomic::load_acquire(&_modules); + ModuleEntryTable* modules = AtomicAccess::load_acquire(&_modules); if (modules == nullptr) { MutexLocker m1(Module_lock); // Check if _modules got allocated while we were waiting for this lock. @@ -645,7 +645,7 @@ ModuleEntryTable* ClassLoaderData::modules() { { MutexLocker m1(metaspace_lock(), Mutex::_no_safepoint_check_flag); // Ensure _modules is stable, since it is examined without a lock - Atomic::release_store(&_modules, modules); + AtomicAccess::release_store(&_modules, modules); } } } @@ -819,7 +819,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() { // The reason for the delayed allocation is because some class loaders are // simply for delegating with no metadata of their own. // Lock-free access requires load_acquire. - ClassLoaderMetaspace* metaspace = Atomic::load_acquire(&_metaspace); + ClassLoaderMetaspace* metaspace = AtomicAccess::load_acquire(&_metaspace); if (metaspace == nullptr) { MutexLocker ml(_metaspace_lock, Mutex::_no_safepoint_check_flag); // Check if _metaspace got allocated while we were waiting for this lock. @@ -833,7 +833,7 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() { metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::StandardMetaspaceType); } // Ensure _metaspace is stable, since it is examined without a lock - Atomic::release_store(&_metaspace, metaspace); + AtomicAccess::release_store(&_metaspace, metaspace); } } return metaspace; @@ -1120,7 +1120,7 @@ void ClassLoaderData::verify() { bool ClassLoaderData::contains_klass(Klass* klass) { // Lock-free access requires load_acquire - for (Klass* k = Atomic::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { + for (Klass* k = AtomicAccess::load_acquire(&_klasses); k != nullptr; k = k->next_link()) { if (k == klass) return true; } return false; diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp index 63004d1458fe8..da49a9326e355 100644 --- a/src/hotspot/share/classfile/classLoaderData.hpp +++ b/src/hotspot/share/classfile/classLoaderData.hpp @@ -28,7 +28,7 @@ #include "memory/allocation.hpp" #include "oops/oopHandle.hpp" #include "oops/weakHandle.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutex.hpp" #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" diff --git a/src/hotspot/share/classfile/classLoaderData.inline.hpp b/src/hotspot/share/classfile/classLoaderData.inline.hpp index 7176e7c263d63..4c4427b19e17a 100644 --- a/src/hotspot/share/classfile/classLoaderData.inline.hpp +++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,16 +35,16 @@ inline void ClassLoaderData::set_next(ClassLoaderData* next) { assert(this->next() == nullptr, "only link once"); - Atomic::store(&_next, next); + AtomicAccess::store(&_next, next); } inline ClassLoaderData* ClassLoaderData::next() const { - return Atomic::load(&_next); + return AtomicAccess::load(&_next); } inline void ClassLoaderData::unlink_next() { assert(next()->is_unloading(), "only remove unloading clds"); - Atomic::store(&_next, _next->_next); + AtomicAccess::store(&_next, _next->_next); } inline void ClassLoaderData::set_unloading_next(ClassLoaderData* unloading_next) { diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp index fca6a9e74ad31..4d3d6a951c51b 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp @@ -36,7 +36,7 @@ #include "memory/allocation.inline.hpp" #include "memory/metaspace.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/safepoint.hpp" @@ -61,20 +61,20 @@ void ClassLoaderDataGraph::clear_claimed_marks() { // // Any ClassLoaderData added after or during walking the list are prepended to // _head. Their claim mark need not be handled here. - for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { + for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) { cld->clear_claim(); } } void ClassLoaderDataGraph::clear_claimed_marks(int claim) { - for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { + for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) { cld->clear_claim(claim); } } void ClassLoaderDataGraph::verify_claimed_marks_cleared(int claim) { #ifdef ASSERT - for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { + for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) { cld->verify_not_claimed(claim); } #endif @@ -155,7 +155,7 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool has_clas // First install the new CLD to the Graph. cld->set_next(_head); - Atomic::release_store(&_head, cld); + AtomicAccess::release_store(&_head, cld); // Next associate with the class_loader. if (!has_class_mirror_holder) { @@ -192,14 +192,14 @@ inline void assert_is_safepoint_or_gc() { // These are functions called by the GC, which require all of the CLDs, including not yet unlinked CLDs. void ClassLoaderDataGraph::cld_do(CLDClosure* cl) { assert_is_safepoint_or_gc(); - for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { + for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) { cl->do_cld(cld); } } void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) { assert_is_safepoint_or_gc(); - for (ClassLoaderData* cld = Atomic::load_acquire(&_head); cld != nullptr; cld = cld->next()) { + for (ClassLoaderData* cld = AtomicAccess::load_acquire(&_head); cld != nullptr; cld = cld->next()) { CLDClosure* closure = (cld->keep_alive_ref_count() > 0) ? strong : weak; if (closure != nullptr) { closure->do_cld(cld); @@ -428,7 +428,7 @@ bool ClassLoaderDataGraph::do_unloading() { } else { assert(data == _head, "sanity check"); // The GC might be walking this concurrently - Atomic::store(&_head, data->next()); + AtomicAccess::store(&_head, data->next()); } } } @@ -533,7 +533,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() { while (head != nullptr) { Klass* next = next_klass_in_cldg(head); - Klass* old_head = Atomic::cmpxchg(&_next_klass, head, next); + Klass* old_head = AtomicAccess::cmpxchg(&_next_klass, head, next); if (old_head == head) { return head; // Won the CAS. diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp index 6880194009c42..767db20a8f019 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "classfile/javaClasses.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) { @@ -44,28 +44,28 @@ inline ClassLoaderData *ClassLoaderDataGraph::find_or_create(Handle loader) { } size_t ClassLoaderDataGraph::num_instance_classes() { - return Atomic::load(&_num_instance_classes); + return AtomicAccess::load(&_num_instance_classes); } size_t ClassLoaderDataGraph::num_array_classes() { - return Atomic::load(&_num_array_classes); + return AtomicAccess::load(&_num_array_classes); } void ClassLoaderDataGraph::inc_instance_classes(size_t count) { - Atomic::add(&_num_instance_classes, count, memory_order_relaxed); + AtomicAccess::add(&_num_instance_classes, count, memory_order_relaxed); } void ClassLoaderDataGraph::dec_instance_classes(size_t count) { - size_t old_count = Atomic::fetch_then_add(&_num_instance_classes, -count, memory_order_relaxed); + size_t old_count = AtomicAccess::fetch_then_add(&_num_instance_classes, -count, memory_order_relaxed); assert(old_count >= count, "Sanity"); } void ClassLoaderDataGraph::inc_array_classes(size_t count) { - Atomic::add(&_num_array_classes, count, memory_order_relaxed); + AtomicAccess::add(&_num_array_classes, count, memory_order_relaxed); } void ClassLoaderDataGraph::dec_array_classes(size_t count) { - size_t old_count = Atomic::fetch_then_add(&_num_array_classes, -count, memory_order_relaxed); + size_t old_count = AtomicAccess::fetch_then_add(&_num_array_classes, -count, memory_order_relaxed); assert(old_count >= count, "Sanity"); } diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index 3617f318b83f8..86a3b22fbbbdc 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -204,11 +204,11 @@ bool java_lang_String::_initialized; bool java_lang_String::test_and_set_flag(oop java_string, uint8_t flag_mask) { uint8_t* addr = flags_addr(java_string); - uint8_t value = Atomic::load(addr); + uint8_t value = AtomicAccess::load(addr); while ((value & flag_mask) == 0) { uint8_t old_value = value; value |= flag_mask; - value = Atomic::cmpxchg(addr, old_value, value); + value = AtomicAccess::cmpxchg(addr, old_value, value); if (value == old_value) return false; // Flag bit changed from 0 to 1. } return true; // Flag bit is already 1. @@ -2140,7 +2140,7 @@ void java_lang_VirtualThread::set_state(oop vthread, int state) { int java_lang_VirtualThread::cmpxchg_state(oop vthread, int old_state, int new_state) { jint* addr = vthread->field_addr(_state_offset); - int res = Atomic::cmpxchg(addr, old_state, new_state); + int res = AtomicAccess::cmpxchg(addr, old_state, new_state); return res; } @@ -2158,9 +2158,9 @@ void java_lang_VirtualThread::set_next(oop vthread, oop next_vthread) { // Method returns true if we added vthread to the list, false otherwise. bool java_lang_VirtualThread::set_onWaitingList(oop vthread, OopHandle& list_head) { jboolean* addr = vthread->field_addr(_onWaitingList_offset); - jboolean vthread_on_list = Atomic::load(addr); + jboolean vthread_on_list = AtomicAccess::load(addr); if (!vthread_on_list) { - vthread_on_list = Atomic::cmpxchg(addr, (jboolean)JNI_FALSE, (jboolean)JNI_TRUE); + vthread_on_list = AtomicAccess::cmpxchg(addr, (jboolean)JNI_FALSE, (jboolean)JNI_TRUE); if (!vthread_on_list) { for (;;) { oop head = list_head.resolve(); @@ -4760,7 +4760,7 @@ int java_lang_ClassLoader::_parent_offset; ClassLoaderData* java_lang_ClassLoader::loader_data_acquire(oop loader) { assert(loader != nullptr, "loader must not be null"); assert(oopDesc::is_oop(loader), "loader must be oop"); - return Atomic::load_acquire(loader->field_addr(_loader_data_offset)); + return AtomicAccess::load_acquire(loader->field_addr(_loader_data_offset)); } ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) { @@ -4772,7 +4772,7 @@ ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) { void java_lang_ClassLoader::release_set_loader_data(oop loader, ClassLoaderData* new_data) { assert(loader != nullptr, "loader must not be null"); assert(oopDesc::is_oop(loader), "loader must be oop"); - Atomic::release_store(loader->field_addr(_loader_data_offset), new_data); + AtomicAccess::release_store(loader->field_addr(_loader_data_offset), new_data); } #define CLASSLOADER_FIELDS_DO(macro) \ diff --git a/src/hotspot/share/classfile/javaClasses.inline.hpp b/src/hotspot/share/classfile/javaClasses.inline.hpp index 66ecca4bbeaa4..3cbbd2c12f2ef 100644 --- a/src/hotspot/share/classfile/javaClasses.inline.hpp +++ b/src/hotspot/share/classfile/javaClasses.inline.hpp @@ -80,7 +80,7 @@ uint8_t* java_lang_String::flags_addr(oop java_string) { } bool java_lang_String::is_flag_set(oop java_string, uint8_t flag_mask) { - return (Atomic::load(flags_addr(java_string)) & flag_mask) != 0; + return (AtomicAccess::load(flags_addr(java_string)) & flag_mask) != 0; } bool java_lang_String::deduplication_forbidden(oop java_string) { diff --git a/src/hotspot/share/classfile/packageEntry.hpp b/src/hotspot/share/classfile/packageEntry.hpp index 84620785ebb05..6abf89dc60f27 100644 --- a/src/hotspot/share/classfile/packageEntry.hpp +++ b/src/hotspot/share/classfile/packageEntry.hpp @@ -28,7 +28,7 @@ #include "classfile/moduleEntry.hpp" #include "oops/symbol.hpp" #include "oops/symbolHandle.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/growableArray.hpp" #include "utilities/hashTable.hpp" #include "utilities/macros.hpp" @@ -222,11 +222,11 @@ class PackageEntry : public CHeapObj { bool is_defined_by_cds_in_class_path(int idx) const { assert(idx < max_index_for_defined_in_class_path(), "sanity"); - return((Atomic::load(&_defined_by_cds_in_class_path) & ((int)1 << idx)) != 0); + return((AtomicAccess::load(&_defined_by_cds_in_class_path) & ((int)1 << idx)) != 0); } void set_defined_by_cds_in_class_path(int idx) { assert(idx < max_index_for_defined_in_class_path(), "sanity"); - Atomic::fetch_then_or(&_defined_by_cds_in_class_path, ((int)1 << idx)); + AtomicAccess::fetch_then_or(&_defined_by_cds_in_class_path, ((int)1 << idx)); } }; diff --git a/src/hotspot/share/classfile/stringTable.cpp b/src/hotspot/share/classfile/stringTable.cpp index 6f6409ee27a49..c6c1c7a31bdca 100644 --- a/src/hotspot/share/classfile/stringTable.cpp +++ b/src/hotspot/share/classfile/stringTable.cpp @@ -47,7 +47,7 @@ #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" #include "oops/weakHandle.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -325,11 +325,11 @@ void StringTable::create_table() { } void StringTable::item_added() { - Atomic::inc(&_items_count); + AtomicAccess::inc(&_items_count); } void StringTable::item_removed() { - Atomic::dec(&_items_count); + AtomicAccess::dec(&_items_count); } double StringTable::get_load_factor() { @@ -345,18 +345,18 @@ size_t StringTable::table_size() { } bool StringTable::has_work() { - return Atomic::load_acquire(&_has_work); + return AtomicAccess::load_acquire(&_has_work); } size_t StringTable::items_count_acquire() { - return Atomic::load_acquire(&_items_count); + return AtomicAccess::load_acquire(&_items_count); } void StringTable::trigger_concurrent_work() { // Avoid churn on ServiceThread if (!has_work()) { MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); - Atomic::store(&_has_work, true); + AtomicAccess::store(&_has_work, true); Service_lock->notify_all(); } } @@ -510,7 +510,7 @@ oop StringTable::intern(const char* utf8_string, TRAPS) { } oop StringTable::intern(const StringWrapper& name, TRAPS) { - assert(!Atomic::load_acquire(&_disable_interning_during_cds_dump), + assert(!AtomicAccess::load_acquire(&_disable_interning_during_cds_dump), "All threads that may intern strings should have been stopped before CDS starts copying the interned string table"); // shared table always uses java_lang_String::hash_code @@ -666,7 +666,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) { // Rehash if needed. Rehashing goes to a safepoint but the rest of this // work is concurrent. if (needs_rehashing() && maybe_rehash_table()) { - Atomic::release_store(&_has_work, false); + AtomicAccess::release_store(&_has_work, false); return; // done, else grow } log_debug(stringtable, perf)("Concurrent work, live factor: %g", get_load_factor()); @@ -676,7 +676,7 @@ void StringTable::do_concurrent_work(JavaThread* jt) { } else { clean_dead_entries(jt); } - Atomic::release_store(&_has_work, false); + AtomicAccess::release_store(&_has_work, false); } // Called at VM_Operation safepoint @@ -966,7 +966,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) { // This flag will be cleared after intern table dumping has completed, so we can run the // compiler again (for future AOT method compilation, etc). - DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, true)); + DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, true)); if (items_count_acquire() > (size_t)max_jint) { fatal("Too many strings to be archived: %zu", items_count_acquire()); @@ -1105,7 +1105,7 @@ void StringTable::write_shared_table() { _local_table->do_safepoint_scan(copy_into_shared_table); writer.dump(&_shared_table, "string"); - DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, false)); + DEBUG_ONLY(AtomicAccess::release_store(&_disable_interning_during_cds_dump, false)); } void StringTable::set_shared_strings_array_index(int root_index) { diff --git a/src/hotspot/share/classfile/symbolTable.cpp b/src/hotspot/share/classfile/symbolTable.cpp index d0bcca87c142a..13320a30872f2 100644 --- a/src/hotspot/share/classfile/symbolTable.cpp +++ b/src/hotspot/share/classfile/symbolTable.cpp @@ -34,7 +34,7 @@ #include "memory/metaspaceClosure.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/timerTrace.hpp" #include "runtime/trimNativeHeap.hpp" @@ -216,17 +216,17 @@ void SymbolTable::create_table () { } } -void SymbolTable::reset_has_items_to_clean() { Atomic::store(&_has_items_to_clean, false); } -void SymbolTable::mark_has_items_to_clean() { Atomic::store(&_has_items_to_clean, true); } -bool SymbolTable::has_items_to_clean() { return Atomic::load(&_has_items_to_clean); } +void SymbolTable::reset_has_items_to_clean() { AtomicAccess::store(&_has_items_to_clean, false); } +void SymbolTable::mark_has_items_to_clean() { AtomicAccess::store(&_has_items_to_clean, true); } +bool SymbolTable::has_items_to_clean() { return AtomicAccess::load(&_has_items_to_clean); } void SymbolTable::item_added() { - Atomic::inc(&_items_count); + AtomicAccess::inc(&_items_count); } void SymbolTable::item_removed() { - Atomic::inc(&(_symbols_removed)); - Atomic::dec(&_items_count); + AtomicAccess::inc(&(_symbols_removed)); + AtomicAccess::dec(&_items_count); } double SymbolTable::get_load_factor() { @@ -237,7 +237,7 @@ size_t SymbolTable::table_size() { return ((size_t)1) << _local_table->get_size_log2(Thread::current()); } -bool SymbolTable::has_work() { return Atomic::load_acquire(&_has_work); } +bool SymbolTable::has_work() { return AtomicAccess::load_acquire(&_has_work); } void SymbolTable::trigger_cleanup() { // Avoid churn on ServiceThread @@ -786,7 +786,7 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) { bdt.done(jt); } - Atomic::add(&_symbols_counted, stdc._processed); + AtomicAccess::add(&_symbols_counted, stdc._processed); log_debug(symboltable)("Cleaned %zu of %zu", stdd._deleted, stdc._processed); @@ -814,7 +814,7 @@ void SymbolTable::do_concurrent_work(JavaThread* jt) { // Rehash if needed. Rehashing goes to a safepoint but the rest of this // work is concurrent. if (needs_rehashing() && maybe_rehash_table()) { - Atomic::release_store(&_has_work, false); + AtomicAccess::release_store(&_has_work, false); return; // done, else grow } log_debug(symboltable, perf)("Concurrent work, live factor: %g", get_load_factor()); @@ -824,7 +824,7 @@ void SymbolTable::do_concurrent_work(JavaThread* jt) { } else { clean_dead_entries(jt); } - Atomic::release_store(&_has_work, false); + AtomicAccess::release_store(&_has_work, false); } // Called at VM_Operation safepoint diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp index f4f7f694d6deb..22d4fd1892ea3 100644 --- a/src/hotspot/share/classfile/systemDictionary.cpp +++ b/src/hotspot/share/classfile/systemDictionary.cpp @@ -66,7 +66,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" @@ -1079,7 +1079,7 @@ InstanceKlass* SystemDictionary::load_shared_class(InstanceKlass* ik, assert(ik != nullptr, "sanity"); assert(ik->in_aot_cache(), "sanity"); assert(!ik->is_unshareable_info_restored(), "shared class can be restored only once"); - assert(Atomic::add(&ik->_shared_class_load_count, 1) == 1, "shared class loaded more than once"); + assert(AtomicAccess::add(&ik->_shared_class_load_count, 1) == 1, "shared class loaded more than once"); Symbol* class_name = ik->name(); if (!is_shared_class_visible(class_name, ik, pkg_entry, class_loader)) { diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index 8f2932f6abc25..beca3bcbcf5d8 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -51,7 +51,7 @@ #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" @@ -580,7 +580,7 @@ void CodeCache::free(CodeBlob* cb) { if (cb->is_nmethod()) { heap->set_nmethod_count(heap->nmethod_count() - 1); if (((nmethod *)cb)->has_dependencies()) { - Atomic::dec(&_number_of_nmethods_with_dependencies); + AtomicAccess::dec(&_number_of_nmethods_with_dependencies); } } if (cb->is_adapter_blob()) { @@ -616,7 +616,7 @@ void CodeCache::commit(CodeBlob* cb) { if (cb->is_nmethod()) { heap->set_nmethod_count(heap->nmethod_count() + 1); if (((nmethod *)cb)->has_dependencies()) { - Atomic::inc(&_number_of_nmethods_with_dependencies); + AtomicAccess::inc(&_number_of_nmethods_with_dependencies); } } if (cb->is_adapter_blob()) { @@ -786,7 +786,7 @@ void CodeCache::gc_on_allocation() { double free_ratio = double(free) / double(max); if (free_ratio <= StartAggressiveSweepingAt / 100.0) { // In case the GC is concurrent, we make sure only one thread requests the GC. - if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { + if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0); Universe::heap()->collect(GCCause::_codecache_GC_aggressive); } @@ -812,7 +812,7 @@ void CodeCache::gc_on_allocation() { // it is eventually invoked to avoid trouble. if (allocated_since_last_ratio > threshold) { // In case the GC is concurrent, we make sure only one thread requests the GC. - if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { + if (AtomicAccess::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)", threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0); Universe::heap()->collect(GCCause::_codecache_GC_threshold); @@ -899,9 +899,9 @@ void CodeCache::release_exception_cache(ExceptionCache* entry) { delete entry; } else { for (;;) { - ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); + ExceptionCache* purge_list_head = AtomicAccess::load(&_exception_cache_purge_list); entry->set_purge_list_next(purge_list_head); - if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { + if (AtomicAccess::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { break; } } @@ -1152,7 +1152,7 @@ void codeCache_init() { //------------------------------------------------------------------------------------------------ bool CodeCache::has_nmethods_with_dependencies() { - return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0; + return AtomicAccess::load_acquire(&_number_of_nmethods_with_dependencies) != 0; } void CodeCache::clear_inline_caches() { diff --git a/src/hotspot/share/code/compiledIC.cpp b/src/hotspot/share/code/compiledIC.cpp index 2547b8711db16..03d9adc8e242d 100644 --- a/src/hotspot/share/code/compiledIC.cpp +++ b/src/hotspot/share/code/compiledIC.cpp @@ -32,7 +32,7 @@ #include "oops/compressedKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/method.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuationEntry.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -104,8 +104,8 @@ void CompiledICData::clean_metadata() { // subsequent miss handlers will upgrade the callsite to megamorphic, // which makes sense as it obviously is megamorphic then. if (!speculated_klass()->is_loader_alive()) { - Atomic::store(&_speculated_klass, (uintptr_t)0); - Atomic::store(&_speculated_method, (Method*)nullptr); + AtomicAccess::store(&_speculated_klass, (uintptr_t)0); + AtomicAccess::store(&_speculated_method, (Method*)nullptr); } assert(_speculated_method == nullptr || _speculated_method->method_holder()->is_loader_alive(), diff --git a/src/hotspot/share/code/dependencyContext.cpp b/src/hotspot/share/code/dependencyContext.cpp index 6c56ae20ddff3..adcdc00f9a48f 100644 --- a/src/hotspot/share/code/dependencyContext.cpp +++ b/src/hotspot/share/code/dependencyContext.cpp @@ -28,7 +28,7 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" @@ -107,7 +107,7 @@ void DependencyContext::add_dependent_nmethod(nmethod* nm) { // to skip list scans. The individual method checks are cheap, but walking the large // list of dependencies gets expensive. - nmethodBucket* head = Atomic::load(_dependency_context_addr); + nmethodBucket* head = AtomicAccess::load(_dependency_context_addr); if (head != nullptr && nm == head->get_nmethod()) { return; } @@ -121,10 +121,10 @@ void DependencyContext::add_dependent_nmethod(nmethod* nm) { nmethodBucket* new_head = new nmethodBucket(nm, nullptr); for (;;) { new_head->set_next(head); - if (Atomic::cmpxchg(_dependency_context_addr, head, new_head) == head) { + if (AtomicAccess::cmpxchg(_dependency_context_addr, head, new_head) == head) { break; } - head = Atomic::load(_dependency_context_addr); + head = AtomicAccess::load(_dependency_context_addr); } if (UsePerfData) { _perf_total_buckets_allocated_count->inc(); @@ -142,9 +142,9 @@ void DependencyContext::release(nmethodBucket* b) { // Mark the context as having stale entries, since it is not safe to // expunge the list right now. for (;;) { - nmethodBucket* purge_list_head = Atomic::load(&_purge_list); + nmethodBucket* purge_list_head = AtomicAccess::load(&_purge_list); b->set_purge_list_next(purge_list_head); - if (Atomic::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { + if (AtomicAccess::cmpxchg(&_purge_list, purge_list_head, b) == purge_list_head) { break; } } @@ -196,7 +196,7 @@ void DependencyContext::remove_all_dependents() { // purge list when calling this. assert(!delete_on_release(), "should not delete on release"); - nmethodBucket* first = Atomic::load_acquire(_dependency_context_addr); + nmethodBucket* first = AtomicAccess::load_acquire(_dependency_context_addr); if (first == nullptr) { return; } @@ -211,10 +211,10 @@ void DependencyContext::remove_all_dependents() { } // Add the whole list to the purge list at once. - nmethodBucket* old_purge_list_head = Atomic::load(&_purge_list); + nmethodBucket* old_purge_list_head = AtomicAccess::load(&_purge_list); for (;;) { last->set_purge_list_next(old_purge_list_head); - nmethodBucket* next_purge_list_head = Atomic::cmpxchg(&_purge_list, old_purge_list_head, first); + nmethodBucket* next_purge_list_head = AtomicAccess::cmpxchg(&_purge_list, old_purge_list_head, first); if (old_purge_list_head == next_purge_list_head) { break; } @@ -264,16 +264,16 @@ bool DependencyContext::is_dependent_nmethod(nmethod* nm) { // dependency context was cleaned. GC threads claim cleanup tasks by performing // a CAS on this value. bool DependencyContext::claim_cleanup() { - uint64_t cleaning_epoch = Atomic::load(&_cleaning_epoch); - uint64_t last_cleanup = Atomic::load(_last_cleanup_addr); + uint64_t cleaning_epoch = AtomicAccess::load(&_cleaning_epoch); + uint64_t last_cleanup = AtomicAccess::load(_last_cleanup_addr); if (last_cleanup >= cleaning_epoch) { return false; } - return Atomic::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup; + return AtomicAccess::cmpxchg(_last_cleanup_addr, last_cleanup, cleaning_epoch) == last_cleanup; } bool DependencyContext::delete_on_release() { - return Atomic::load(&_cleaning_epoch) == 0; + return AtomicAccess::load(&_cleaning_epoch) == 0; } // Retrieve the first nmethodBucket that has a dependent that does not correspond to @@ -282,17 +282,17 @@ bool DependencyContext::delete_on_release() { nmethodBucket* DependencyContext::dependencies_not_unloading() { for (;;) { // Need acquire because the read value could come from a concurrent insert. - nmethodBucket* head = Atomic::load_acquire(_dependency_context_addr); + nmethodBucket* head = AtomicAccess::load_acquire(_dependency_context_addr); if (head == nullptr || !head->get_nmethod()->is_unloading()) { return head; } nmethodBucket* head_next = head->next(); OrderAccess::loadload(); - if (Atomic::load(_dependency_context_addr) != head) { + if (AtomicAccess::load(_dependency_context_addr) != head) { // Unstable load of head w.r.t. head->next continue; } - if (Atomic::cmpxchg(_dependency_context_addr, head, head_next) == head) { + if (AtomicAccess::cmpxchg(_dependency_context_addr, head, head_next) == head) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(head); } @@ -301,11 +301,11 @@ nmethodBucket* DependencyContext::dependencies_not_unloading() { // Relaxed accessors void DependencyContext::set_dependencies(nmethodBucket* b) { - Atomic::store(_dependency_context_addr, b); + AtomicAccess::store(_dependency_context_addr, b); } nmethodBucket* DependencyContext::dependencies() { - return Atomic::load(_dependency_context_addr); + return AtomicAccess::load(_dependency_context_addr); } // After the gc_prologue, the dependency contexts may be claimed by the GC @@ -314,7 +314,7 @@ nmethodBucket* DependencyContext::dependencies() { void DependencyContext::cleaning_start() { assert(SafepointSynchronize::is_at_safepoint(), "must be"); uint64_t epoch = ++_cleaning_epoch_monotonic; - Atomic::store(&_cleaning_epoch, epoch); + AtomicAccess::store(&_cleaning_epoch, epoch); } // The epilogue marks the end of dependency context cleanup by the GC, @@ -324,7 +324,7 @@ void DependencyContext::cleaning_start() { // was called. That allows dependency contexts to be cleaned concurrently. void DependencyContext::cleaning_end() { uint64_t epoch = 0; - Atomic::store(&_cleaning_epoch, epoch); + AtomicAccess::store(&_cleaning_epoch, epoch); } // This function skips over nmethodBuckets in the list corresponding to @@ -336,17 +336,17 @@ nmethodBucket* nmethodBucket::next_not_unloading() { for (;;) { // Do not need acquire because the loaded entry can never be // concurrently inserted. - nmethodBucket* next = Atomic::load(&_next); + nmethodBucket* next = AtomicAccess::load(&_next); if (next == nullptr || !next->get_nmethod()->is_unloading()) { return next; } - nmethodBucket* next_next = Atomic::load(&next->_next); + nmethodBucket* next_next = AtomicAccess::load(&next->_next); OrderAccess::loadload(); - if (Atomic::load(&_next) != next) { + if (AtomicAccess::load(&_next) != next) { // Unstable load of next w.r.t. next->next continue; } - if (Atomic::cmpxchg(&_next, next, next_next) == next) { + if (AtomicAccess::cmpxchg(&_next, next, next_next) == next) { // Release is_unloading entries if unlinking was claimed DependencyContext::release(next); } @@ -355,17 +355,17 @@ nmethodBucket* nmethodBucket::next_not_unloading() { // Relaxed accessors nmethodBucket* nmethodBucket::next() { - return Atomic::load(&_next); + return AtomicAccess::load(&_next); } void nmethodBucket::set_next(nmethodBucket* b) { - Atomic::store(&_next, b); + AtomicAccess::store(&_next, b); } nmethodBucket* nmethodBucket::purge_list_next() { - return Atomic::load(&_purge_list_next); + return AtomicAccess::load(&_purge_list_next); } void nmethodBucket::set_purge_list_next(nmethodBucket* b) { - Atomic::store(&_purge_list_next, b); + AtomicAccess::store(&_purge_list_next, b); } diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index b8b3a70bf589d..fbd9f5030c87c 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -59,7 +59,7 @@ #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiThreadState.hpp" #include "prims/methodHandles.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" @@ -376,11 +376,11 @@ bool ExceptionCache::add_address_and_handler(address addr, address handler) { } ExceptionCache* ExceptionCache::next() { - return Atomic::load(&_next); + return AtomicAccess::load(&_next); } void ExceptionCache::set_next(ExceptionCache *ec) { - Atomic::store(&_next, ec); + AtomicAccess::store(&_next, ec); } //----------------------------------------------------------------------------- @@ -492,12 +492,12 @@ const char* nmethod::state() const { void nmethod::set_deoptimized_done() { ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); if (_deoptimization_status != deoptimize_done) { // can't go backwards - Atomic::store(&_deoptimization_status, deoptimize_done); + AtomicAccess::store(&_deoptimization_status, deoptimize_done); } } ExceptionCache* nmethod::exception_cache_acquire() const { - return Atomic::load_acquire(&_exception_cache); + return AtomicAccess::load_acquire(&_exception_cache); } void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { @@ -517,7 +517,7 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { // next pointers always point at live ExceptionCaches, that are not removed due // to concurrent ExceptionCache cleanup. ExceptionCache* next = ec->next(); - if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) { + if (AtomicAccess::cmpxchg(&_exception_cache, ec, next) == ec) { CodeCache::release_exception_cache(ec); } continue; @@ -527,7 +527,7 @@ void nmethod::add_exception_cache_entry(ExceptionCache* new_entry) { new_entry->set_next(ec); } } - if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) { + if (AtomicAccess::cmpxchg(&_exception_cache, ec, new_entry) == ec) { return; } } @@ -560,7 +560,7 @@ void nmethod::clean_exception_cache() { // Try to clean head; this is contended by concurrent inserts, that // both lazily clean the head, and insert entries at the head. If // the CAS fails, the operation is restarted. - if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) { + if (AtomicAccess::cmpxchg(&_exception_cache, curr, next) != curr) { prev = nullptr; curr = exception_cache_acquire(); continue; @@ -919,7 +919,7 @@ void nmethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all if (md != nullptr && md->is_method()) { Method* method = static_cast(md); if (!method->method_holder()->is_loader_alive()) { - Atomic::store(r->metadata_addr(), (Method*)nullptr); + AtomicAccess::store(r->metadata_addr(), (Method*)nullptr); if (!r->metadata_is_immediate()) { r->fix_metadata_relocation(); @@ -1923,13 +1923,13 @@ void nmethod::verify_clean_inline_caches() { } void nmethod::mark_as_maybe_on_stack() { - Atomic::store(&_gc_epoch, CodeCache::gc_epoch()); + AtomicAccess::store(&_gc_epoch, CodeCache::gc_epoch()); } bool nmethod::is_maybe_on_stack() { // If the condition below is true, it means that the nmethod was found to // be alive the previous completed marking cycle. - return Atomic::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle(); + return AtomicAccess::load(&_gc_epoch) >= CodeCache::previous_completed_gc_marking_cycle(); } void nmethod::inc_decompile_count() { @@ -1956,7 +1956,7 @@ bool nmethod::try_transition(signed char new_state_int) { // Ensure monotonicity of transitions. return false; } - Atomic::store(&_state, new_state); + AtomicAccess::store(&_state, new_state); return true; } @@ -2007,7 +2007,7 @@ bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) { return false; } - if (Atomic::load(&_state) == not_entrant) { + if (AtomicAccess::load(&_state) == not_entrant) { // Avoid taking the lock if already in required state. // This is safe from races because the state is an end-state, // which the nmethod cannot back out of once entered. @@ -2019,7 +2019,7 @@ bool nmethod::make_not_entrant(InvalidationReason invalidation_reason) { // Enter critical section. Does not block for safepoint. ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag); - if (Atomic::load(&_state) == not_entrant) { + if (AtomicAccess::load(&_state) == not_entrant) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false; @@ -2390,7 +2390,7 @@ class IsUnloadingState: public AllStatic { }; bool nmethod::is_unloading() { - uint8_t state = Atomic::load(&_is_unloading_state); + uint8_t state = AtomicAccess::load(&_is_unloading_state); bool state_is_unloading = IsUnloadingState::is_unloading(state); if (state_is_unloading) { return true; @@ -2413,7 +2413,7 @@ bool nmethod::is_unloading() { // different outcomes, so we guard the computed result with a CAS // to ensure all threads have a shared view of whether an nmethod // is_unloading or not. - uint8_t found_state = Atomic::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed); + uint8_t found_state = AtomicAccess::cmpxchg(&_is_unloading_state, state, new_state, memory_order_relaxed); if (found_state == state) { // First to change state, we win @@ -2426,7 +2426,7 @@ bool nmethod::is_unloading() { void nmethod::clear_unloading_state() { uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle()); - Atomic::store(&_is_unloading_state, state); + AtomicAccess::store(&_is_unloading_state, state); } @@ -2511,7 +2511,7 @@ bool nmethod::oops_do_try_claim_weak_request() { assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); if ((_oops_do_mark_link == nullptr) && - (Atomic::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) { + (AtomicAccess::replace_if_null(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag)))) { oops_do_log_change("oops_do, mark weak request"); return true; } @@ -2525,7 +2525,7 @@ void nmethod::oops_do_set_strong_done(nmethod* old_head) { nmethod::oops_do_mark_link* nmethod::oops_do_try_claim_strong_done() { assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); - oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag)); + oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(nullptr, claim_weak_request_tag), mark_link(this, claim_strong_done_tag)); if (old_next == nullptr) { oops_do_log_change("oops_do, mark strong done"); } @@ -2536,7 +2536,7 @@ nmethod::oops_do_mark_link* nmethod::oops_do_try_add_strong_request(nmethod::oop assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); assert(next == mark_link(this, claim_weak_request_tag), "Should be claimed as weak"); - oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag)); + oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(this, claim_strong_request_tag)); if (old_next == next) { oops_do_log_change("oops_do, mark strong request"); } @@ -2547,7 +2547,7 @@ bool nmethod::oops_do_try_claim_weak_done_as_strong_done(nmethod::oops_do_mark_l assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); assert(extract_state(next) == claim_weak_done_tag, "Should be claimed as weak done"); - oops_do_mark_link* old_next = Atomic::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag)); + oops_do_mark_link* old_next = AtomicAccess::cmpxchg(&_oops_do_mark_link, next, mark_link(extract_nmethod(next), claim_strong_done_tag)); if (old_next == next) { oops_do_log_change("oops_do, mark weak done -> mark strong done"); return true; @@ -2562,13 +2562,13 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() { extract_state(_oops_do_mark_link) == claim_strong_request_tag, "must be but is nmethod " PTR_FORMAT " %u", p2i(extract_nmethod(_oops_do_mark_link)), extract_state(_oops_do_mark_link)); - nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); + nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this); // Self-loop if needed. if (old_head == nullptr) { old_head = this; } // Try to install end of list and weak done tag. - if (Atomic::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) { + if (AtomicAccess::cmpxchg(&_oops_do_mark_link, mark_link(this, claim_weak_request_tag), mark_link(old_head, claim_weak_done_tag)) == mark_link(this, claim_weak_request_tag)) { oops_do_log_change("oops_do, mark weak done"); return nullptr; } else { @@ -2579,7 +2579,7 @@ nmethod* nmethod::oops_do_try_add_to_list_as_weak_done() { void nmethod::oops_do_add_to_list_as_strong_done() { assert(SafepointSynchronize::is_at_safepoint(), "only at safepoint"); - nmethod* old_head = Atomic::xchg(&_oops_do_mark_nmethods, this); + nmethod* old_head = AtomicAccess::xchg(&_oops_do_mark_nmethods, this); // Self-loop if needed. if (old_head == nullptr) { old_head = this; diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 301454b659159..3abda6010b7d1 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -286,7 +286,7 @@ class nmethod : public CodeBlob { volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization DeoptimizationStatus deoptimization_status() const { - return Atomic::load(&_deoptimization_status); + return AtomicAccess::load(&_deoptimization_status); } // Initialize fields to their default values diff --git a/src/hotspot/share/code/nmethod.inline.hpp b/src/hotspot/share/code/nmethod.inline.hpp index 1e556b6825099..62c8eb723eaa9 100644 --- a/src/hotspot/share/code/nmethod.inline.hpp +++ b/src/hotspot/share/code/nmethod.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "code/nmethod.hpp" #include "code/nativeInst.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.hpp" inline bool nmethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); } @@ -43,7 +43,7 @@ inline bool nmethod::is_deopt_mh_entry(address pc) { // class ExceptionCache methods -inline int ExceptionCache::count() { return Atomic::load_acquire(&_count); } +inline int ExceptionCache::count() { return AtomicAccess::load_acquire(&_count); } address ExceptionCache::pc_at(int index) { assert(index >= 0 && index < count(),""); @@ -56,7 +56,7 @@ address ExceptionCache::handler_at(int index) { } // increment_count is only called under lock, but there may be concurrent readers. -inline void ExceptionCache::increment_count() { Atomic::release_store(&_count, _count + 1); } +inline void ExceptionCache::increment_count() { AtomicAccess::release_store(&_count, _count + 1); } #endif // SHARE_CODE_NMETHOD_INLINE_HPP diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp index 00826f820366d..b926888595d7a 100644 --- a/src/hotspot/share/code/vtableStubs.cpp +++ b/src/hotspot/share/code/vtableStubs.cpp @@ -127,7 +127,7 @@ void VtableStubs::initialize() { { MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); for (int i = 0; i < N; i++) { - Atomic::store(&_table[i], (VtableStub*)nullptr); + AtomicAccess::store(&_table[i], (VtableStub*)nullptr); } } } @@ -268,7 +268,7 @@ inline uint VtableStubs::unsafe_hash(address entry_point) { VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) { assert_lock_strong(VtableStubs_lock); unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index); - VtableStub* s = Atomic::load(&_table[hash]); + VtableStub* s = AtomicAccess::load(&_table[hash]); while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next(); return s; } @@ -279,9 +279,9 @@ void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) { assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub"); unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index); // Insert s at the beginning of the corresponding list. - s->set_next(Atomic::load(&_table[h])); + s->set_next(AtomicAccess::load(&_table[h])); // Make sure that concurrent readers not taking the mutex observe the writing of "next". - Atomic::release_store(&_table[h], s); + AtomicAccess::release_store(&_table[h], s); } VtableStub* VtableStubs::entry_point(address pc) { @@ -292,7 +292,7 @@ VtableStub* VtableStubs::entry_point(address pc) { MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag); uint hash = VtableStubs::unsafe_hash(pc); VtableStub* s; - for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {} + for (s = AtomicAccess::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {} return (s != nullptr && s->entry_point() == pc) ? s : nullptr; } @@ -305,7 +305,7 @@ bool VtableStubs::contains(address pc) { VtableStub* VtableStubs::stub_containing(address pc) { for (int i = 0; i < N; i++) { - for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) { + for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) { if (s->contains(pc)) return s; } } @@ -318,7 +318,7 @@ void vtableStubs_init() { void VtableStubs::vtable_stub_do(void f(VtableStub*)) { for (int i = 0; i < N; i++) { - for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) { + for (VtableStub* s = AtomicAccess::load_acquire(&_table[i]); s != nullptr; s = s->next()) { f(s); } } diff --git a/src/hotspot/share/compiler/compilationMemoryStatistic.cpp b/src/hotspot/share/compiler/compilationMemoryStatistic.cpp index d22eab8ac5530..d1e2f6f34a0b7 100644 --- a/src/hotspot/share/compiler/compilationMemoryStatistic.cpp +++ b/src/hotspot/share/compiler/compilationMemoryStatistic.cpp @@ -37,7 +37,7 @@ #include "nmt/nmtCommon.hpp" #include "oops/method.inline.hpp" #include "oops/symbol.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "utilities/checkedCast.hpp" @@ -902,7 +902,7 @@ void CompilationMemoryStatistic::on_arena_chunk_allocation(size_t size, int aren // Store this ArenaStat. If other threads also run into OOMs, let them sleep. // We will never return, so the global store will not contain this info. We will // print the stored ArenaStat in hs-err (see print_error_report) - if (Atomic::cmpxchg(&_arenastat_oom_crash, (ArenaStatCounter*) nullptr, arena_stat) != nullptr) { + if (AtomicAccess::cmpxchg(&_arenastat_oom_crash, (ArenaStatCounter*) nullptr, arena_stat) != nullptr) { os::infinite_sleep(); } } @@ -992,7 +992,7 @@ static bool check_before_reporting(outputStream* st) { } bool CompilationMemoryStatistic::in_oom_crash() { - return Atomic::load(&_arenastat_oom_crash) != nullptr; + return AtomicAccess::load(&_arenastat_oom_crash) != nullptr; } void CompilationMemoryStatistic::print_error_report(outputStream* st) { @@ -1000,7 +1000,7 @@ void CompilationMemoryStatistic::print_error_report(outputStream* st) { return; } StreamIndentor si(tty, 4); - const ArenaStatCounter* const oom_stats = Atomic::load(&_arenastat_oom_crash); + const ArenaStatCounter* const oom_stats = AtomicAccess::load(&_arenastat_oom_crash); if (oom_stats != nullptr) { // we crashed due to a compiler limit hit. Lead with a printout of the offending stats // in detail. diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 5f3bdc5ee3c41..d1e3154bbd9ac 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -53,7 +53,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/nativeLookup.hpp" #include "prims/whitebox.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/escapeBarrier.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" @@ -1587,14 +1587,14 @@ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { assert(!is_osr, "can't be osr"); // Adapters, native wrappers and method handle intrinsics // should be generated always. - return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); + return AtomicAccess::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); } else if (CICountOSR && is_osr) { - id = Atomic::add(&_osr_compilation_id, 1); + id = AtomicAccess::add(&_osr_compilation_id, 1); if (CIStartOSR <= id && id < CIStopOSR) { return id; } } else { - id = Atomic::add(&_compilation_id, 1); + id = AtomicAccess::add(&_compilation_id, 1); if (CIStart <= id && id < CIStop) { return id; } @@ -1606,7 +1606,7 @@ int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { #else // CICountOSR is a develop flag and set to 'false' by default. In a product built, // only _compilation_id is incremented. - return Atomic::add(&_compilation_id, 1); + return AtomicAccess::add(&_compilation_id, 1); #endif } diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp index 1c936511dca87..cb657b7b601bf 100644 --- a/src/hotspot/share/compiler/compileBroker.hpp +++ b/src/hotspot/share/compiler/compileBroker.hpp @@ -30,7 +30,7 @@ #include "compiler/compilerDirectives.hpp" #include "compiler/compilerThread.hpp" #include "compiler/compileTask.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/perfDataTypes.hpp" #include "utilities/stack.hpp" #if INCLUDE_JVMCI @@ -362,7 +362,7 @@ class CompileBroker: AllStatic { static inline bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); } static bool set_should_compile_new_jobs(jint new_state) { // Return success if the current caller set it - jint old = Atomic::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state); + jint old = AtomicAccess::cmpxchg(&_should_compile_new_jobs, 1-new_state, new_state); bool success = (old == (1-new_state)); if (success) { if (new_state == run_compilation) { @@ -377,11 +377,11 @@ class CompileBroker: AllStatic { static void disable_compilation_forever() { UseCompiler = false; AlwaysCompileLoopMethods = false; - Atomic::xchg(&_should_compile_new_jobs, jint(shutdown_compilation)); + AtomicAccess::xchg(&_should_compile_new_jobs, jint(shutdown_compilation)); } static bool is_compilation_disabled_forever() { - return Atomic::load(&_should_compile_new_jobs) == shutdown_compilation; + return AtomicAccess::load(&_should_compile_new_jobs) == shutdown_compilation; } static void wait_for_no_active_tasks(); @@ -389,7 +389,7 @@ class CompileBroker: AllStatic { static void handle_full_code_cache(CodeBlobType code_blob_type); // Ensures that warning is only printed once. static bool should_print_compiler_warning() { - jint old = Atomic::cmpxchg(&_print_compilation_warning, 0, 1); + jint old = AtomicAccess::cmpxchg(&_print_compilation_warning, 0, 1); return old == 0; } // Return total compilation ticks diff --git a/src/hotspot/share/compiler/compileLog.cpp b/src/hotspot/share/compiler/compileLog.cpp index 9641e99c33a21..d0ea80d60194e 100644 --- a/src/hotspot/share/compiler/compileLog.cpp +++ b/src/hotspot/share/compiler/compileLog.cpp @@ -28,7 +28,7 @@ #include "jvm.h" #include "memory/allocation.inline.hpp" #include "oops/method.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" @@ -51,9 +51,9 @@ CompileLog::CompileLog(const char* file_name, FILE* fp, intx thread_id) // link into the global list while (true) { - CompileLog* head = Atomic::load_acquire(&_list_head); + CompileLog* head = AtomicAccess::load_acquire(&_list_head); _next = head; - if (Atomic::cmpxchg(&_list_head, head, this) == head) { + if (AtomicAccess::cmpxchg(&_list_head, head, this) == head) { break; } } @@ -206,7 +206,7 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen) if (called_exit) return; called_exit = true; - CompileLog* log = Atomic::load_acquire(&_list_head); + CompileLog* log = AtomicAccess::load_acquire(&_list_head); while (log != nullptr) { log->flush(); const char* partial_file = log->file(); @@ -294,7 +294,7 @@ void CompileLog::finish_log_on_error(outputStream* file, char* buf, int buflen) delete log; // Removes partial file log = next_log; } - Atomic::store(&_list_head, (CompileLog*)nullptr); + AtomicAccess::store(&_list_head, (CompileLog*)nullptr); } // ------------------------------------------------------------------ diff --git a/src/hotspot/share/compiler/compileTask.cpp b/src/hotspot/share/compiler/compileTask.cpp index e3b640372a36a..536d81045d7d1 100644 --- a/src/hotspot/share/compiler/compileTask.cpp +++ b/src/hotspot/share/compiler/compileTask.cpp @@ -76,7 +76,7 @@ CompileTask::CompileTask(int compile_id, _next = nullptr; _prev = nullptr; - Atomic::add(&_active_tasks, 1, memory_order_relaxed); + AtomicAccess::add(&_active_tasks, 1, memory_order_relaxed); } CompileTask::~CompileTask() { @@ -91,7 +91,7 @@ CompileTask::~CompileTask() { _failure_reason_on_C_heap = false; } - if (Atomic::sub(&_active_tasks, 1, memory_order_relaxed) == 0) { + if (AtomicAccess::sub(&_active_tasks, 1, memory_order_relaxed) == 0) { MonitorLocker wait_ml(CompileTaskWait_lock); wait_ml.notify_all(); } @@ -99,7 +99,7 @@ CompileTask::~CompileTask() { void CompileTask::wait_for_no_active_tasks() { MonitorLocker locker(CompileTaskWait_lock); - while (Atomic::load(&_active_tasks) > 0) { + while (AtomicAccess::load(&_active_tasks) > 0) { locker.wait(); } } diff --git a/src/hotspot/share/compiler/oopMap.cpp b/src/hotspot/share/compiler/oopMap.cpp index aeb6ac3828efb..aa0108729755f 100644 --- a/src/hotspot/share/compiler/oopMap.cpp +++ b/src/hotspot/share/compiler/oopMap.cpp @@ -34,7 +34,7 @@ #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" #include "oops/compressedOops.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/signature.hpp" diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp index c324aa9baffa2..16cae714cb976 100644 --- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp @@ -34,7 +34,7 @@ #include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "utilities/ostream.hpp" @@ -154,7 +154,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) { // Allocation successful, update counters if (verbose) { size_t last = _last_counter_update; - if ((used - last >= _step_counter_update) && Atomic::cmpxchg(&_last_counter_update, last, used) == last) { + if ((used - last >= _step_counter_update) && AtomicAccess::cmpxchg(&_last_counter_update, last, used) == last) { _monitoring_support->update_counters(); } } @@ -162,7 +162,7 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) { // ...and print the occupancy line, if needed if (verbose) { size_t last = _last_heap_print; - if ((used - last >= _step_heap_print) && Atomic::cmpxchg(&_last_heap_print, last, used) == last) { + if ((used - last >= _step_heap_print) && AtomicAccess::cmpxchg(&_last_heap_print, last, used) == last) { print_heap_info(used); print_metaspace_info(); } diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.cpp b/src/hotspot/share/gc/g1/g1BatchedTask.cpp index 9089ffedf61de..5755830154149 100644 --- a/src/hotspot/share/gc/g1/g1BatchedTask.cpp +++ b/src/hotspot/share/gc/g1/g1BatchedTask.cpp @@ -26,7 +26,7 @@ #include "gc/g1/g1BatchedTask.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1GCParPhaseTimesTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/growableArray.hpp" void G1AbstractSubTask::record_work_item(uint worker_id, uint index, size_t count) { @@ -40,7 +40,7 @@ const char* G1AbstractSubTask::name() const { } bool G1BatchedTask::try_claim_serial_task(int& task) { - task = Atomic::fetch_then_add(&_num_serial_tasks_done, 1); + task = AtomicAccess::fetch_then_add(&_num_serial_tasks_done, 1); return task < _serial_tasks.length(); } @@ -96,8 +96,8 @@ void G1BatchedTask::work(uint worker_id) { } G1BatchedTask::~G1BatchedTask() { - assert(Atomic::load(&_num_serial_tasks_done) >= _serial_tasks.length(), - "Only %d tasks of %d claimed", Atomic::load(&_num_serial_tasks_done), _serial_tasks.length()); + assert(AtomicAccess::load(&_num_serial_tasks_done) >= _serial_tasks.length(), + "Only %d tasks of %d claimed", AtomicAccess::load(&_num_serial_tasks_done), _serial_tasks.length()); for (G1AbstractSubTask* task : _parallel_tasks) { delete task; diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp index dcd1979343acb..4653f96980de5 100644 --- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp +++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp @@ -49,7 +49,7 @@ G1BlockOffsetTable::G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* st void G1BlockOffsetTable::set_offset_array(uint8_t* addr, uint8_t offset) { check_address(addr, "Block offset table address out of range"); - Atomic::store(addr, offset); + AtomicAccess::store(addr, offset); } void G1BlockOffsetTable::set_offset_array(uint8_t* addr, HeapWord* high, HeapWord* low) { diff --git a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp index e35ae08dfada2..900e9516c1a0e 100644 --- a/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp +++ b/src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "gc/shared/cardTable.hpp" #include "gc/shared/memset_with_concurrent_readers.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void* addr) const { assert(_reserved.contains(addr), "invalid address"); @@ -52,7 +52,7 @@ inline HeapWord* G1BlockOffsetTable::block_start_reaching_into_card(const void* uint8_t G1BlockOffsetTable::offset_array(uint8_t* addr) const { check_address(addr, "Block offset table address out of range"); - return Atomic::load(addr); + return AtomicAccess::load(addr); } inline uint8_t* G1BlockOffsetTable::entry_for_addr(const void* const p) const { diff --git a/src/hotspot/share/gc/g1/g1CardSet.cpp b/src/hotspot/share/gc/g1/g1CardSet.cpp index f5b9bf2aebeb4..80cf5fea76abd 100644 --- a/src/hotspot/share/gc/g1/g1CardSet.cpp +++ b/src/hotspot/share/gc/g1/g1CardSet.cpp @@ -29,7 +29,7 @@ #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "utilities/bitMap.inline.hpp" @@ -215,9 +215,9 @@ void G1CardSetCoarsenStats::subtract_from(G1CardSetCoarsenStats& other) { void G1CardSetCoarsenStats::record_coarsening(uint tag, bool collision) { assert(tag < ARRAY_SIZE(_coarsen_from), "tag %u out of bounds", tag); - Atomic::inc(&_coarsen_from[tag], memory_order_relaxed); + AtomicAccess::inc(&_coarsen_from[tag], memory_order_relaxed); if (collision) { - Atomic::inc(&_coarsen_collision[tag], memory_order_relaxed); + AtomicAccess::inc(&_coarsen_collision[tag], memory_order_relaxed); } } @@ -314,7 +314,7 @@ class G1CardSetHashTable : public CHeapObj { if (!_inserted_card && inserted) { // It does not matter to us who is setting the flag so a regular atomic store // is sufficient. - Atomic::store(&_inserted_card, true); + AtomicAccess::store(&_inserted_card, true); } return found.value(); @@ -343,9 +343,9 @@ class G1CardSetHashTable : public CHeapObj { } void reset() { - if (Atomic::load(&_inserted_card)) { + if (AtomicAccess::load(&_inserted_card)) { _table.unsafe_reset(InitialLogTableSize); - Atomic::store(&_inserted_card, false); + AtomicAccess::store(&_inserted_card, false); } } @@ -462,7 +462,7 @@ G1CardSet::ContainerPtr G1CardSet::acquire_container(ContainerPtr volatile* cont GlobalCounter::CriticalSection cs(Thread::current()); while (true) { // Get ContainerPtr and increment refcount atomically wrt to memory reuse. - ContainerPtr container = Atomic::load_acquire(container_addr); + ContainerPtr container = AtomicAccess::load_acquire(container_addr); uint cs_type = container_type(container); if (container == FullCardSet || cs_type == ContainerInlinePtr) { return container; @@ -505,13 +505,13 @@ class G1ReleaseCardsets : public StackObj { void coarsen_to_full(ContainerPtr* container_addr) { while (true) { - ContainerPtr cur_container = Atomic::load_acquire(container_addr); + ContainerPtr cur_container = AtomicAccess::load_acquire(container_addr); uint cs_type = G1CardSet::container_type(cur_container); if (cur_container == G1CardSet::FullCardSet) { return; } - ContainerPtr old_value = Atomic::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet); + ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, G1CardSet::FullCardSet); if (old_value == cur_container) { _card_set->release_and_maybe_free_container(cur_container); @@ -547,7 +547,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container, ContainerPtr volatile* bucket_entry = howl->container_addr(bucket); while (true) { - if (Atomic::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) { + if (AtomicAccess::load(&howl->_num_entries) >= _config->cards_in_howl_threshold()) { return Overflow; } @@ -571,7 +571,7 @@ G1AddCardResult G1CardSet::add_to_howl(ContainerPtr parent_container, } if (increment_total && add_result == Added) { - Atomic::inc(&howl->_num_entries, memory_order_relaxed); + AtomicAccess::inc(&howl->_num_entries, memory_order_relaxed); } if (to_transfer != nullptr) { @@ -640,7 +640,7 @@ bool G1CardSet::coarsen_container(ContainerPtr volatile* container_addr, ShouldNotReachHere(); } - ContainerPtr old_value = Atomic::cmpxchg(container_addr, cur_container, new_container); // Memory order? + ContainerPtr old_value = AtomicAccess::cmpxchg(container_addr, cur_container, new_container); // Memory order? if (old_value == cur_container) { // Success. Indicate that the cards from the current card set must be transferred // by this caller. @@ -687,7 +687,7 @@ void G1CardSet::transfer_cards(G1CardSetHashTableValue* table_entry, ContainerPt assert(container_type(source_container) == ContainerHowl, "must be"); // Need to correct for that the Full remembered set occupies more cards than the // AoCS before. - Atomic::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed); + AtomicAccess::add(&_num_occupied, _config->max_cards_in_region() - table_entry->_num_occupied, memory_order_relaxed); } } @@ -713,14 +713,14 @@ void G1CardSet::transfer_cards_in_howl(ContainerPtr parent_container, diff -= 1; G1CardSetHowl* howling_array = container_ptr(parent_container); - Atomic::add(&howling_array->_num_entries, diff, memory_order_relaxed); + AtomicAccess::add(&howling_array->_num_entries, diff, memory_order_relaxed); G1CardSetHashTableValue* table_entry = get_container(card_region); assert(table_entry != nullptr, "Table entry not found for transferred cards"); - Atomic::add(&table_entry->_num_occupied, diff, memory_order_relaxed); + AtomicAccess::add(&table_entry->_num_occupied, diff, memory_order_relaxed); - Atomic::add(&_num_occupied, diff, memory_order_relaxed); + AtomicAccess::add(&_num_occupied, diff, memory_order_relaxed); } } @@ -827,8 +827,8 @@ G1AddCardResult G1CardSet::add_card(uint card_region, uint card_in_region, bool } if (increment_total && add_result == Added) { - Atomic::inc(&table_entry->_num_occupied, memory_order_relaxed); - Atomic::inc(&_num_occupied, memory_order_relaxed); + AtomicAccess::inc(&table_entry->_num_occupied, memory_order_relaxed); + AtomicAccess::inc(&_num_occupied, memory_order_relaxed); } if (should_grow_table) { _table->grow(); diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp index 84e6e3e9274a3..72c7795be2e7c 100644 --- a/src/hotspot/share/gc/g1/g1CardSetContainers.hpp +++ b/src/hotspot/share/gc/g1/g1CardSetContainers.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "gc/g1/g1CardSet.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/bitMap.hpp" #include "utilities/globalDefinitions.hpp" @@ -151,7 +151,7 @@ class G1CardSetContainer { public: G1CardSetContainer() : _ref_count(3) { } - uintptr_t refcount() const { return Atomic::load_acquire(&_ref_count); } + uintptr_t refcount() const { return AtomicAccess::load_acquire(&_ref_count); } bool try_increment_refcount(); @@ -192,7 +192,7 @@ class G1CardSetArray : public G1CardSetContainer { } ~G1CardSetArrayLocker() { - Atomic::release_store(_num_entries_addr, _local_num_entries); + AtomicAccess::release_store(_num_entries_addr, _local_num_entries); } }; diff --git a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp index 0efc44dea12fe..1958309f517b9 100644 --- a/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CardSetContainers.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,7 +67,7 @@ inline G1AddCardResult G1CardSetInlinePtr::add(uint card_idx, uint bits_per_card return Overflow; } ContainerPtr new_value = merge(_value, card_idx, num_cards, bits_per_card); - ContainerPtr old_value = Atomic::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed); + ContainerPtr old_value = AtomicAccess::cmpxchg(_value_addr, _value, new_value, memory_order_relaxed); if (_value == old_value) { return Added; } @@ -126,7 +126,7 @@ inline bool G1CardSetContainer::try_increment_refcount() { } uintptr_t new_value = old_value + 2; - uintptr_t ref_count = Atomic::cmpxchg(&_ref_count, old_value, new_value); + uintptr_t ref_count = AtomicAccess::cmpxchg(&_ref_count, old_value, new_value); if (ref_count == old_value) { return true; } @@ -137,7 +137,7 @@ inline bool G1CardSetContainer::try_increment_refcount() { inline uintptr_t G1CardSetContainer::decrement_refcount() { uintptr_t old_value = refcount(); assert((old_value & 0x1) != 0 && old_value >= 3, "precondition"); - return Atomic::sub(&_ref_count, 2u); + return AtomicAccess::sub(&_ref_count, 2u); } inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_cards) : @@ -152,11 +152,11 @@ inline G1CardSetArray::G1CardSetArray(uint card_in_region, EntryCountType num_ca inline G1CardSetArray::G1CardSetArrayLocker::G1CardSetArrayLocker(EntryCountType volatile* num_entries_addr) : _num_entries_addr(num_entries_addr) { SpinYield s; - EntryCountType num_entries = Atomic::load(_num_entries_addr) & EntryMask; + EntryCountType num_entries = AtomicAccess::load(_num_entries_addr) & EntryMask; while (true) { - EntryCountType old_value = Atomic::cmpxchg(_num_entries_addr, - num_entries, - (EntryCountType)(num_entries | LockBitMask)); + EntryCountType old_value = AtomicAccess::cmpxchg(_num_entries_addr, + num_entries, + (EntryCountType)(num_entries | LockBitMask)); if (old_value == num_entries) { // Succeeded locking the array. _local_num_entries = num_entries; @@ -189,7 +189,7 @@ inline G1CardSetArray::EntryDataType G1CardSetArray::at(EntryCountType index) co inline G1AddCardResult G1CardSetArray::add(uint card_idx) { assert(card_idx < (1u << (sizeof(EntryDataType) * BitsPerByte)), "Card index %u does not fit allowed card value range.", card_idx); - EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask; + EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask; EntryCountType idx = 0; for (; idx < num_entries; idx++) { if (at(idx) == card_idx) { @@ -223,7 +223,7 @@ inline G1AddCardResult G1CardSetArray::add(uint card_idx) { } inline bool G1CardSetArray::contains(uint card_idx) { - EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask; + EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask; for (EntryCountType idx = 0; idx < num_entries; idx++) { if (at(idx) == card_idx) { @@ -235,7 +235,7 @@ inline bool G1CardSetArray::contains(uint card_idx) { template void G1CardSetArray::iterate(CardVisitor& found) { - EntryCountType num_entries = Atomic::load_acquire(&_num_entries) & EntryMask; + EntryCountType num_entries = AtomicAccess::load_acquire(&_num_entries) & EntryMask; for (EntryCountType idx = 0; idx < num_entries; idx++) { found(at(idx)); } @@ -260,7 +260,7 @@ inline G1AddCardResult G1CardSetBitMap::add(uint card_idx, size_t threshold, siz return bm.at(card_idx) ? Found : Overflow; } if (bm.par_set_bit(card_idx)) { - Atomic::inc(&_num_bits_set, memory_order_relaxed); + AtomicAccess::inc(&_num_bits_set, memory_order_relaxed); return Added; } return Found; @@ -311,7 +311,7 @@ inline G1CardSetHowl::G1CardSetHowl(EntryCountType card_in_region, G1CardSetConf inline bool G1CardSetHowl::contains(uint card_idx, G1CardSetConfiguration* config) { EntryCountType bucket = config->howl_bucket_index(card_idx); ContainerPtr* array_entry = container_addr(bucket); - ContainerPtr container = Atomic::load_acquire(array_entry); + ContainerPtr container = AtomicAccess::load_acquire(array_entry); switch (G1CardSet::container_type(container)) { case G1CardSet::ContainerArrayOfCards: { diff --git a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp index 6f683e777fb94..d13a6fe2dcaf8 100644 --- a/src/hotspot/share/gc/g1/g1CardSetMemory.cpp +++ b/src/hotspot/share/gc/g1/g1CardSetMemory.cpp @@ -26,7 +26,7 @@ #include "gc/g1/g1CardSetContainers.inline.hpp" #include "gc/g1/g1CardSetMemory.inline.hpp" #include "gc/g1/g1MonotonicArena.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/ostream.hpp" G1CardSetAllocator::G1CardSetAllocator(const char* name, diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp index 40534b9b4c98a..60ad3a2af3256 100644 --- a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp +++ b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp @@ -28,7 +28,7 @@ #include "gc/g1/g1HeapRegion.hpp" #include "memory/allocation.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/concurrentHashTable.inline.hpp" #include "utilities/concurrentHashTableTasks.inline.hpp" @@ -120,7 +120,7 @@ class G1CodeRootSetHashTable : public CHeapObj { bool grow_hint = false; bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint); if (inserted) { - Atomic::inc(&_num_entries); + AtomicAccess::inc(&_num_entries); } if (grow_hint) { _table.grow(Thread::current()); @@ -131,7 +131,7 @@ class G1CodeRootSetHashTable : public CHeapObj { HashTableLookUp lookup(method); bool removed = _table.remove(Thread::current(), lookup); if (removed) { - Atomic::dec(&_num_entries); + AtomicAccess::dec(&_num_entries); } return removed; } @@ -182,7 +182,7 @@ class G1CodeRootSetHashTable : public CHeapObj { guarantee(succeeded, "unable to clean table"); if (num_deleted != 0) { - size_t current_size = Atomic::sub(&_num_entries, num_deleted); + size_t current_size = AtomicAccess::sub(&_num_entries, num_deleted); shrink_to_match(current_size); } } @@ -226,7 +226,7 @@ class G1CodeRootSetHashTable : public CHeapObj { size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); } - size_t number_of_entries() const { return Atomic::load(&_num_entries); } + size_t number_of_entries() const { return AtomicAccess::load(&_num_entries); } }; uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index de3fc0f5da599..4f7eaa36c2d03 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -105,7 +105,7 @@ #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/cpuTimeCounters.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index 553be04d28527..3370ff9938f2a 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -41,7 +41,7 @@ #include "gc/shared/markBitMap.inline.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "oops/stackChunkOop.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/threadSMR.inline.hpp" #include "utilities/bitMap.inline.hpp" @@ -53,10 +53,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) { inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) { count = 0; - if (Atomic::load(&_cur_claim) >= _list.length()) { + if (AtomicAccess::load(&_cur_claim) >= _list.length()) { return nullptr; } - uint claim = Atomic::fetch_then_add(&_cur_claim, _claim_step); + uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step); if (claim >= _list.length()) { return nullptr; } diff --git a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp index aa4bd3f1e5bef..954ca40a77fa7 100644 --- a/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp +++ b/src/hotspot/share/gc/g1/g1CollectionSetChooser.cpp @@ -27,7 +27,7 @@ #include "gc/g1/g1CollectionSetChooser.hpp" #include "gc/g1/g1HeapRegionRemSet.inline.hpp" #include "gc/shared/space.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/quickSort.hpp" // Determine collection set candidates (from marking): For all regions determine @@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { // Claim a new chunk, returning its bounds [from, to[. void claim_chunk(uint& from, uint& to) { - uint result = Atomic::add(&_cur_claim_idx, _chunk_size); + uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size); assert(_max_size > result - 1, "Array too small, is %u should be %u with chunk size %u.", _max_size, result, _chunk_size); @@ -208,7 +208,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { void update_totals(uint num_regions) { if (num_regions > 0) { - Atomic::add(&_num_regions_added, num_regions); + AtomicAccess::add(&_num_regions_added, num_regions); } } @@ -220,7 +220,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { void prune(G1HeapRegion** data) { G1Policy* p = G1CollectedHeap::heap()->policy(); - uint num_candidates = Atomic::load(&_num_regions_added); + uint num_candidates = AtomicAccess::load(&_num_regions_added); uint min_old_cset_length = p->calc_min_old_cset_length(num_candidates); uint num_pruned = 0; @@ -253,7 +253,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask { wasted_bytes, allowed_waste); - Atomic::sub(&_num_regions_added, num_pruned, memory_order_relaxed); + AtomicAccess::sub(&_num_regions_added, num_pruned, memory_order_relaxed); } public: diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 5e42bf7188216..6d30a93dafb88 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -66,7 +66,7 @@ #include "nmt/memTracker.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -151,21 +151,21 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_ return nullptr; } - size_t cur_idx = Atomic::fetch_then_add(&_size, 1u); + size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u); if (cur_idx >= _max_capacity) { return nullptr; } size_t bucket = get_bucket(cur_idx); - if (Atomic::load_acquire(&_buckets[bucket]) == nullptr) { + if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) { if (!_should_grow) { // Prefer to restart the CM. return nullptr; } MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); - if (Atomic::load_acquire(&_buckets[bucket]) == nullptr) { + if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) { size_t desired_capacity = bucket_size(bucket) * 2; if (!try_expand_to(desired_capacity)) { return nullptr; @@ -258,7 +258,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) { // and the new capacity (new_capacity). This step ensures that there are no gaps in the // array and that the capacity accurately reflects the reserved memory. for (; i <= highest_bucket; i++) { - if (Atomic::load_acquire(&_buckets[i]) != nullptr) { + if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) { continue; // Skip over already allocated buckets. } @@ -278,7 +278,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) { return false; } _capacity += bucket_capacity; - Atomic::release_store(&_buckets[i], bucket_base); + AtomicAccess::release_store(&_buckets[i], bucket_base); } return true; } @@ -383,7 +383,7 @@ void G1CMRootMemRegions::reset() { void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) { assert_at_safepoint(); - size_t idx = Atomic::fetch_then_add(&_num_root_regions, 1u); + size_t idx = AtomicAccess::fetch_then_add(&_num_root_regions, 1u); assert(idx < _max_regions, "Trying to add more root MemRegions than there is space %zu", _max_regions); assert(start != nullptr && end != nullptr && start <= end, "Start (" PTR_FORMAT ") should be less or equal to " "end (" PTR_FORMAT ")", p2i(start), p2i(end)); @@ -411,7 +411,7 @@ const MemRegion* G1CMRootMemRegions::claim_next() { return nullptr; } - size_t claimed_index = Atomic::fetch_then_add(&_claimed_root_regions, 1u); + size_t claimed_index = AtomicAccess::fetch_then_add(&_claimed_root_regions, 1u); if (claimed_index < _num_root_regions) { return &_root_regions[claimed_index]; } @@ -1109,7 +1109,7 @@ void G1ConcurrentMark::concurrent_cycle_start() { } uint G1ConcurrentMark::completed_mark_cycles() const { - return Atomic::load(&_completed_mark_cycles); + return AtomicAccess::load(&_completed_mark_cycles); } void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) { @@ -1118,7 +1118,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) { _g1h->trace_heap_after_gc(_gc_tracer_cm); if (mark_cycle_completed) { - Atomic::inc(&_completed_mark_cycles, memory_order_relaxed); + AtomicAccess::inc(&_completed_mark_cycles, memory_order_relaxed); } if (has_aborted()) { @@ -1320,7 +1320,7 @@ class G1UpdateRegionLivenessAndSelectForRebuildTask : public WorkerTask { G1OnRegionClosure on_region_cl(_g1h, _cm, &local_cleanup_list); _g1h->heap_region_par_iterate_from_worker_offset(&on_region_cl, &_hrclaimer, worker_id); - Atomic::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild); + AtomicAccess::add(&_total_selected_for_rebuild, on_region_cl._num_selected_for_rebuild); // Update the old/humongous region sets _g1h->remove_from_old_gen_sets(on_region_cl._num_old_regions_removed, @@ -1903,7 +1903,7 @@ G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) { HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords; // Is the gap between reading the finger and doing the CAS too long? - HeapWord* res = Atomic::cmpxchg(&_finger, finger, end); + HeapWord* res = AtomicAccess::cmpxchg(&_finger, finger, end); if (res == finger && curr_region != nullptr) { // we succeeded HeapWord* bottom = curr_region->bottom(); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp index dc88f09f9809b..84776b7a4b172 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp @@ -338,7 +338,7 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) { assert_current_thread_is_primary_refinement_thread(); size_t num_cards = _dcqs.num_cards(); size_t mutator_threshold = SIZE_MAX; - uint old_wanted = Atomic::load(&_threads_wanted); + uint old_wanted = AtomicAccess::load(&_threads_wanted); _threads_needed.update(old_wanted, available_bytes, @@ -360,7 +360,7 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) { // worse. mutator_threshold = _pending_cards_target; } - Atomic::store(&_threads_wanted, new_wanted); + AtomicAccess::store(&_threads_wanted, new_wanted); _dcqs.set_mutator_refinement_threshold(mutator_threshold); log_debug(gc, refine)("Concurrent refinement: wanted %u, cards: %zu, " "predicted: %zu, time: %1.2fms", @@ -374,7 +374,7 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) { if (!_thread_control.activate(i)) { // Failed to allocate and activate thread. Stop trying to activate, and // instead use mutator threads to make up the gap. - Atomic::store(&_threads_wanted, i); + AtomicAccess::store(&_threads_wanted, i); _dcqs.set_mutator_refinement_threshold(_pending_cards_target); break; } @@ -384,9 +384,9 @@ void G1ConcurrentRefine::adjust_threads_wanted(size_t available_bytes) { void G1ConcurrentRefine::reduce_threads_wanted() { assert_current_thread_is_primary_refinement_thread(); if (!_needs_adjust) { // Defer if adjustment request is active. - uint wanted = Atomic::load(&_threads_wanted); + uint wanted = AtomicAccess::load(&_threads_wanted); if (wanted > 0) { - Atomic::store(&_threads_wanted, --wanted); + AtomicAccess::store(&_threads_wanted, --wanted); } // If very little time remains until GC, enable mutator refinement. If // the target has been reached, this keeps the number of pending cards on @@ -398,7 +398,7 @@ void G1ConcurrentRefine::reduce_threads_wanted() { } bool G1ConcurrentRefine::is_thread_wanted(uint worker_id) const { - return worker_id < Atomic::load(&_threads_wanted); + return worker_id < AtomicAccess::load(&_threads_wanted); } bool G1ConcurrentRefine::is_thread_adjustment_needed() const { diff --git a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp index a2b463ecb347f..ec9d68af3bb59 100644 --- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp @@ -37,7 +37,7 @@ #include "gc/shared/bufferNodeList.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "memory/iterator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" @@ -117,14 +117,14 @@ void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) { } size_t G1DirtyCardQueueSet::num_cards() const { - return Atomic::load(&_num_cards); + return AtomicAccess::load(&_num_cards); } void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) { assert(cbn != nullptr, "precondition"); // Increment _num_cards before adding to queue, so queue removal doesn't // need to deal with _num_cards possibly going negative. - Atomic::add(&_num_cards, cbn->size()); + AtomicAccess::add(&_num_cards, cbn->size()); // Perform push in CS. The old tail may be popped while the push is // observing it (attaching it to the new buffer). We need to ensure it // can't be reused until the push completes, to avoid ABA problems. @@ -160,7 +160,7 @@ BufferNode* G1DirtyCardQueueSet::get_completed_buffer() { result = dequeue_completed_buffer(); if (result == nullptr) return nullptr; } - Atomic::sub(&_num_cards, result->size()); + AtomicAccess::sub(&_num_cards, result->size()); return result; } @@ -172,9 +172,9 @@ void G1DirtyCardQueueSet::verify_num_cards() const { cur = cur->next()) { actual += cur->size(); } - assert(actual == Atomic::load(&_num_cards), + assert(actual == AtomicAccess::load(&_num_cards), "Num entries in completed buffers should be %zu but are %zu", - Atomic::load(&_num_cards), actual); + AtomicAccess::load(&_num_cards), actual); } #endif // ASSERT @@ -185,7 +185,7 @@ G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() : #ifdef ASSERT G1DirtyCardQueueSet::PausedBuffers::PausedList::~PausedList() { - assert(Atomic::load(&_head) == nullptr, "precondition"); + assert(AtomicAccess::load(&_head) == nullptr, "precondition"); assert(_tail == nullptr, "precondition"); } #endif // ASSERT @@ -198,7 +198,7 @@ bool G1DirtyCardQueueSet::PausedBuffers::PausedList::is_next() const { void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) { assert_not_at_safepoint(); assert(is_next(), "precondition"); - BufferNode* old_head = Atomic::xchg(&_head, node); + BufferNode* old_head = AtomicAccess::xchg(&_head, node); if (old_head == nullptr) { assert(_tail == nullptr, "invariant"); _tail = node; @@ -208,9 +208,9 @@ void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) { } G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() { - BufferNode* head = Atomic::load(&_head); + BufferNode* head = AtomicAccess::load(&_head); BufferNode* tail = _tail; - Atomic::store(&_head, (BufferNode*)nullptr); + AtomicAccess::store(&_head, (BufferNode*)nullptr); _tail = nullptr; return HeadTail(head, tail); } @@ -219,17 +219,17 @@ G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(nullptr) {} #ifdef ASSERT G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() { - assert(Atomic::load(&_plist) == nullptr, "invariant"); + assert(AtomicAccess::load(&_plist) == nullptr, "invariant"); } #endif // ASSERT void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) { assert_not_at_safepoint(); - PausedList* plist = Atomic::load_acquire(&_plist); + PausedList* plist = AtomicAccess::load_acquire(&_plist); if (plist == nullptr) { // Try to install a new next list. plist = new PausedList(); - PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)nullptr, plist); + PausedList* old_plist = AtomicAccess::cmpxchg(&_plist, (PausedList*)nullptr, plist); if (old_plist != nullptr) { // Some other thread installed a new next list. Use it instead. delete plist; @@ -247,11 +247,11 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous( // Deal with plist in a critical section, to prevent it from being // deleted out from under us by a concurrent take_previous(). GlobalCounter::CriticalSection cs(Thread::current()); - previous = Atomic::load_acquire(&_plist); + previous = AtomicAccess::load_acquire(&_plist); if ((previous == nullptr) || // Nothing to take. previous->is_next() || // Not from a previous safepoint. // Some other thread stole it. - (Atomic::cmpxchg(&_plist, previous, (PausedList*)nullptr) != previous)) { + (AtomicAccess::cmpxchg(&_plist, previous, (PausedList*)nullptr) != previous)) { return HeadTail(); } } @@ -268,9 +268,9 @@ G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous( G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() { assert_at_safepoint(); HeadTail result; - PausedList* plist = Atomic::load(&_plist); + PausedList* plist = AtomicAccess::load(&_plist); if (plist != nullptr) { - Atomic::store(&_plist, (PausedList*)nullptr); + AtomicAccess::store(&_plist, (PausedList*)nullptr); result = plist->take(); delete plist; } @@ -286,7 +286,7 @@ void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) { // notification checking after the coming safepoint if it doesn't GC. // Note that this means the queue's _num_cards differs from the number // of cards in the queued buffers when there are paused buffers. - Atomic::add(&_num_cards, node->size()); + AtomicAccess::add(&_num_cards, node->size()); _paused.add(node); } @@ -325,7 +325,7 @@ void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) { assert(allocator() == src->allocator(), "precondition"); const BufferNodeList from = src->take_all_completed_buffers(); if (from._head != nullptr) { - Atomic::add(&_num_cards, from._entry_count); + AtomicAccess::add(&_num_cards, from._entry_count); _completed.append(*from._head, *from._tail); } } @@ -334,8 +334,8 @@ BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() { enqueue_all_paused_buffers(); verify_num_cards(); Pair pair = _completed.take_all(); - size_t num_cards = Atomic::load(&_num_cards); - Atomic::store(&_num_cards, size_t(0)); + size_t num_cards = AtomicAccess::load(&_num_cards); + AtomicAccess::store(&_num_cards, size_t(0)); return BufferNodeList(pair.first, pair.second, num_cards); } @@ -480,7 +480,7 @@ void G1DirtyCardQueueSet::handle_completed_buffer(BufferNode* new_node, enqueue_completed_buffer(new_node); // No need for mutator refinement if number of cards is below limit. - if (Atomic::load(&_num_cards) <= Atomic::load(&_mutator_refinement_threshold)) { + if (AtomicAccess::load(&_num_cards) <= AtomicAccess::load(&_mutator_refinement_threshold)) { return; } @@ -514,7 +514,7 @@ bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id, size_t stop_at, G1ConcurrentRefineStats* stats) { // Not enough cards to trigger processing. - if (Atomic::load(&_num_cards) <= stop_at) return false; + if (AtomicAccess::load(&_num_cards) <= stop_at) return false; BufferNode* node = get_completed_buffer(); if (node == nullptr) return false; // Didn't get a buffer to process. @@ -591,9 +591,9 @@ void G1DirtyCardQueueSet::record_detached_refinement_stats(G1ConcurrentRefineSta } size_t G1DirtyCardQueueSet::mutator_refinement_threshold() const { - return Atomic::load(&_mutator_refinement_threshold); + return AtomicAccess::load(&_mutator_refinement_threshold); } void G1DirtyCardQueueSet::set_mutator_refinement_threshold(size_t value) { - Atomic::store(&_mutator_refinement_threshold, value); + AtomicAccess::store(&_mutator_refinement_threshold, value); } diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp index 86ceb40e97edb..ffcb5a0022f7c 100644 --- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp +++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.cpp @@ -28,7 +28,7 @@ #include "gc/g1/g1EvacFailureRegions.inline.hpp" #include "gc/g1/g1HeapRegion.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/bitMap.inline.hpp" G1EvacFailureRegions::G1EvacFailureRegions() : @@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() { } void G1EvacFailureRegions::pre_collection(uint max_regions) { - Atomic::store(&_num_regions_evac_failed, 0u); + AtomicAccess::store(&_num_regions_evac_failed, 0u); _regions_evac_failed.resize(max_regions); _regions_pinned.resize(max_regions); _regions_alloc_failed.resize(max_regions); @@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure, G1CollectedHeap::heap()->par_iterate_regions_array(closure, hrclaimer, _evac_failed_regions, - Atomic::load(&_num_regions_evac_failed), + AtomicAccess::load(&_num_regions_evac_failed), worker_id); } diff --git a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp index 6d5084a144720..6eec9b63e6b5d 100644 --- a/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp +++ b/src/hotspot/share/gc/g1/g1EvacFailureRegions.inline.hpp @@ -29,10 +29,10 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" uint G1EvacFailureRegions::num_regions_evac_failed() const { - return Atomic::load(&_num_regions_evac_failed); + return AtomicAccess::load(&_num_regions_evac_failed); } bool G1EvacFailureRegions::has_regions_evac_failed() const { @@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi bool success = _regions_evac_failed.par_set_bit(region_idx, memory_order_relaxed); if (success) { - size_t offset = Atomic::fetch_then_add(&_num_regions_evac_failed, 1u); + size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u); _evac_failed_regions[offset] = region_idx; G1CollectedHeap* g1h = G1CollectedHeap::heap(); diff --git a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp index 123ce92cb1d8e..c90598a30cb06 100644 --- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp +++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,28 +27,28 @@ #include "gc/g1/g1EvacStats.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline void G1EvacStats::add_direct_allocated(size_t value) { - Atomic::add(&_direct_allocated, value, memory_order_relaxed); + AtomicAccess::add(&_direct_allocated, value, memory_order_relaxed); } inline void G1EvacStats::add_num_plab_filled(size_t value) { - Atomic::add(&_num_plab_filled, value, memory_order_relaxed); + AtomicAccess::add(&_num_plab_filled, value, memory_order_relaxed); } inline void G1EvacStats::add_num_direct_allocated(size_t value) { - Atomic::add(&_num_direct_allocated, value, memory_order_relaxed); + AtomicAccess::add(&_num_direct_allocated, value, memory_order_relaxed); } inline void G1EvacStats::add_region_end_waste(size_t value) { - Atomic::add(&_region_end_waste, value, memory_order_relaxed); - Atomic::inc(&_regions_filled, memory_order_relaxed); + AtomicAccess::add(&_region_end_waste, value, memory_order_relaxed); + AtomicAccess::inc(&_regions_filled, memory_order_relaxed); } inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) { - Atomic::add(&_failure_used, used, memory_order_relaxed); - Atomic::add(&_failure_waste, waste, memory_order_relaxed); + AtomicAccess::add(&_failure_used, used, memory_order_relaxed); + AtomicAccess::add(&_failure_waste, waste, memory_order_relaxed); } #endif // SHARE_GC_G1_G1EVACSTATS_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1FreeIdSet.cpp b/src/hotspot/share/gc/g1/g1FreeIdSet.cpp index 6ff863920df23..8a26a6140ce76 100644 --- a/src/hotspot/share/gc/g1/g1FreeIdSet.cpp +++ b/src/hotspot/share/gc/g1/g1FreeIdSet.cpp @@ -24,7 +24,7 @@ #include "gc/g1/g1FreeIdSet.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/checkedCast.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -74,13 +74,13 @@ uint G1FreeIdSet::claim_par_id() { // Semaphore gate permits passage by no more than the number of // available ids, so there must be one that we can claim. But there // may be multiple threads trying to claim ids at the same time. - uintx old_head = Atomic::load(&_head); + uintx old_head = AtomicAccess::load(&_head); uint index; while (true) { index = head_index(old_head); assert(index < _size, "invariant"); uintx new_head = make_head(_next[index], old_head); - new_head = Atomic::cmpxchg(&_head, old_head, new_head); + new_head = AtomicAccess::cmpxchg(&_head, old_head, new_head); if (new_head == old_head) break; old_head = new_head; } @@ -92,11 +92,11 @@ void G1FreeIdSet::release_par_id(uint id) { uint index = id - _start; assert(index < _size, "invalid id %u", id); assert(_next[index] == Claimed, "precondition"); - uintx old_head = Atomic::load(&_head); + uintx old_head = AtomicAccess::load(&_head); while (true) { _next[index] = head_index(old_head); uintx new_head = make_head(index, old_head); - new_head = Atomic::cmpxchg(&_head, old_head, new_head); + new_head = AtomicAccess::cmpxchg(&_head, old_head, new_head); if (new_head == old_head) break; old_head = new_head; } diff --git a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp index 106f88e7d0df2..b52f3d796044b 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/g1/g1FullGCHeapRegionAttr.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" bool G1FullCollector::is_compacting(oop obj) const { return _region_attr_table.is_compacting(cast_from_oop(obj)); @@ -63,11 +63,11 @@ void G1FullCollector::update_from_skip_compacting_to_compacting(uint region_idx) } void G1FullCollector::set_compaction_top(G1HeapRegion* r, HeapWord* value) { - Atomic::store(&_compaction_tops[r->hrm_index()], value); + AtomicAccess::store(&_compaction_tops[r->hrm_index()], value); } HeapWord* G1FullCollector::compaction_top(G1HeapRegion* r) const { - return Atomic::load(&_compaction_tops[r->hrm_index()]); + return AtomicAccess::load(&_compaction_tops[r->hrm_index()]); } void G1FullCollector::set_has_compaction_targets() { diff --git a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp index 9a1dd4d1ff54e..83c846e84d4d8 100644 --- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp +++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp @@ -37,7 +37,7 @@ #include "gc/shared/weakProcessor.inline.hpp" #include "logging/log.hpp" #include "memory/iterator.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" class G1AdjustLiveClosure : public StackObj { G1AdjustClosure* _adjust_closure; diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.cpp b/src/hotspot/share/gc/g1/g1HeapRegion.cpp index 86b224865175f..09bdfefccb7d9 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.cpp @@ -43,7 +43,7 @@ #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "utilities/powerOfTwo.hpp" @@ -288,7 +288,7 @@ void G1HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { } void G1HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) { - Atomic::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed); + AtomicAccess::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed); } // Code roots support @@ -441,7 +441,7 @@ void G1HeapRegion::print_on(outputStream* st) const { st->print("|-"); } } - st->print("|%3zu", Atomic::load(&_pinned_object_count)); + st->print("|%3zu", AtomicAccess::load(&_pinned_object_count)); st->print_cr(""); } diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.hpp index 7d49633d0bcbf..71584ffb24d26 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -394,7 +394,7 @@ class G1HeapRegion : public CHeapObj { bool is_old_or_humongous() const { return _type.is_old_or_humongous(); } - size_t pinned_count() const { return Atomic::load(&_pinned_object_count); } + size_t pinned_count() const { return AtomicAccess::load(&_pinned_object_count); } bool has_pinned_objects() const { return pinned_count() > 0; } void set_free(); diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp b/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp index 236e72aeb914d..f25bf62c9bed7 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegion.inline.hpp @@ -35,7 +35,7 @@ #include "gc/g1/g1Policy.hpp" #include "gc/g1/g1Predictions.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/init.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" @@ -194,7 +194,7 @@ inline HeapWord* G1HeapRegion::par_allocate(size_t min_word_size, size_t want_to_allocate = MIN2(available, desired_word_size); if (want_to_allocate >= min_word_size) { HeapWord* new_top = obj + want_to_allocate; - HeapWord* result = Atomic::cmpxchg(&_top, obj, new_top); + HeapWord* result = AtomicAccess::cmpxchg(&_top, obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. @@ -258,11 +258,11 @@ inline HeapWord* G1HeapRegion::parsable_bottom() const { } inline HeapWord* G1HeapRegion::parsable_bottom_acquire() const { - return Atomic::load_acquire(&_parsable_bottom); + return AtomicAccess::load_acquire(&_parsable_bottom); } inline void G1HeapRegion::reset_parsable_bottom() { - Atomic::release_store(&_parsable_bottom, bottom()); + AtomicAccess::release_store(&_parsable_bottom, bottom()); } inline void G1HeapRegion::note_end_of_marking(HeapWord* top_at_mark_start, size_t marked_bytes, size_t incoming_refs) { @@ -511,7 +511,7 @@ inline void G1HeapRegion::record_surv_words_in_group(size_t words_survived) { inline void G1HeapRegion::add_pinned_object_count(size_t value) { assert(value != 0, "wasted effort"); assert(!is_free(), "trying to pin free region %u, adding %zu", hrm_index(), value); - Atomic::add(&_pinned_object_count, value, memory_order_relaxed); + AtomicAccess::add(&_pinned_object_count, value, memory_order_relaxed); } inline void G1HeapRegion::install_cset_group(G1CSetCandidateGroup* cset_group) { diff --git a/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp b/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp index fe5aa3d215089..d4286a1caeba9 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionManager.cpp @@ -34,7 +34,7 @@ #include "jfr/jfrEvents.hpp" #include "logging/logStream.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "utilities/bitMap.inline.hpp" @@ -726,7 +726,7 @@ bool G1HeapRegionClaimer::is_region_claimed(uint region_index) const { bool G1HeapRegionClaimer::claim_region(uint region_index) { assert(region_index < _n_regions, "Invalid index."); - uint old_val = Atomic::cmpxchg(&_claims[region_index], Unclaimed, Claimed); + uint old_val = AtomicAccess::cmpxchg(&_claims[region_index], Unclaimed, Claimed); return old_val == Unclaimed; } diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp index b05a60234e297..fae73a2c6bf6f 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.cpp @@ -31,7 +31,7 @@ #include "memory/allocation.hpp" #include "memory/padded.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp index 32008f7f20ded..023216f989d17 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.hpp @@ -30,7 +30,7 @@ #include "gc/g1/g1CodeRootSet.hpp" #include "gc/g1/g1CollectionSetCandidates.hpp" #include "gc/g1/g1FromCardCache.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" #include "utilities/bitMap.hpp" diff --git a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp index ff92795928911..fbd529cb1d302 100644 --- a/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp +++ b/src/hotspot/share/gc/g1/g1HeapRegionRemSet.inline.hpp @@ -30,7 +30,7 @@ #include "gc/g1/g1CardSet.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1HeapRegion.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/bitMap.inline.hpp" void G1HeapRegionRemSet::set_state_untracked() { diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp index 4c7a411d87867..1ee299e2ee41a 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp @@ -24,7 +24,7 @@ #include "gc/g1/g1MonotonicArena.inline.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/vmOperations.hpp" #include "utilities/globalCounter.inline.hpp" @@ -61,13 +61,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first, size_t num, size_t mem_size) { _list.prepend(first, last); - Atomic::add(&_num_segments, num, memory_order_relaxed); - Atomic::add(&_mem_size, mem_size, memory_order_relaxed); + AtomicAccess::add(&_num_segments, num, memory_order_relaxed); + AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed); } void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) { out->print_cr("%s: segments %zu size %zu", - prefix, Atomic::load(&_num_segments), Atomic::load(&_mem_size)); + prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size)); } G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments, @@ -75,12 +75,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu GlobalCounter::CriticalSection cs(Thread::current()); Segment* result = _list.pop_all(); - num_segments = Atomic::load(&_num_segments); - mem_size = Atomic::load(&_mem_size); + num_segments = AtomicAccess::load(&_num_segments); + mem_size = AtomicAccess::load(&_mem_size); if (result != nullptr) { - Atomic::sub(&_num_segments, num_segments, memory_order_relaxed); - Atomic::sub(&_mem_size, mem_size, memory_order_relaxed); + AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed); + AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed); } return result; } @@ -96,8 +96,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() { Segment::delete_segment(cur); } - Atomic::sub(&_num_segments, num_freed, memory_order_relaxed); - Atomic::sub(&_mem_size, mem_size_freed, memory_order_relaxed); + AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed); + AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed); } G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { @@ -115,7 +115,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { } // Install it as current allocation segment. - Segment* old = Atomic::cmpxchg(&_first, prev, next); + Segment* old = AtomicAccess::cmpxchg(&_first, prev, next); if (old != prev) { // Somebody else installed the segment, use that one. Segment::delete_segment(next); @@ -126,9 +126,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { _last = next; } // Successfully installed the segment into the list. - Atomic::inc(&_num_segments, memory_order_relaxed); - Atomic::add(&_mem_size, next->mem_size(), memory_order_relaxed); - Atomic::add(&_num_total_slots, next->num_slots(), memory_order_relaxed); + AtomicAccess::inc(&_num_segments, memory_order_relaxed); + AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed); + AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed); return next; } } @@ -155,7 +155,7 @@ uint G1MonotonicArena::slot_size() const { } void G1MonotonicArena::drop_all() { - Segment* cur = Atomic::load_acquire(&_first); + Segment* cur = AtomicAccess::load_acquire(&_first); if (cur != nullptr) { assert(_last != nullptr, "If there is at least one segment, there must be a last one."); @@ -193,7 +193,7 @@ void G1MonotonicArena::drop_all() { void* G1MonotonicArena::allocate() { assert(slot_size() > 0, "instance size not set."); - Segment* cur = Atomic::load_acquire(&_first); + Segment* cur = AtomicAccess::load_acquire(&_first); if (cur == nullptr) { cur = new_segment(cur); } @@ -201,7 +201,7 @@ void* G1MonotonicArena::allocate() { while (true) { void* slot = cur->allocate_slot(); if (slot != nullptr) { - Atomic::inc(&_num_allocated_slots, memory_order_relaxed); + AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed); guarantee(is_aligned(slot, _alloc_options->slot_alignment()), "result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment()); return slot; @@ -213,7 +213,7 @@ void* G1MonotonicArena::allocate() { } uint G1MonotonicArena::num_segments() const { - return Atomic::load(&_num_segments); + return AtomicAccess::load(&_num_segments); } #ifdef ASSERT @@ -238,7 +238,7 @@ uint G1MonotonicArena::calculate_length() const { template void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const { - Segment* cur = Atomic::load_acquire(&_first); + Segment* cur = AtomicAccess::load_acquire(&_first); assert((cur != nullptr) == (_last != nullptr), "If there is at least one segment, there must be a last one"); diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp index b51f3e37db180..0434a222b2160 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -81,11 +81,11 @@ class G1MonotonicArena : public FreeListConfig { DEBUG_ONLY(uint calculate_length() const;) public: - const Segment* first_segment() const { return Atomic::load(&_first); } + const Segment* first_segment() const { return AtomicAccess::load(&_first); } - uint num_total_slots() const { return Atomic::load(&_num_total_slots); } + uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); } uint num_allocated_slots() const { - uint allocated = Atomic::load(&_num_allocated_slots); + uint allocated = AtomicAccess::load(&_num_allocated_slots); assert(calculate_length() == allocated, "Must be"); return allocated; } @@ -214,8 +214,8 @@ class G1MonotonicArena::SegmentFreeList { void print_on(outputStream* out, const char* prefix = ""); - size_t num_segments() const { return Atomic::load(&_num_segments); } - size_t mem_size() const { return Atomic::load(&_mem_size); } + size_t num_segments() const { return AtomicAccess::load(&_num_segments); } + size_t mem_size() const { return AtomicAccess::load(&_mem_size); } }; // Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment. diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp index 4ded4bcccae4d..d4c1aa8c4e3c1 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,14 +28,14 @@ #include "gc/g1/g1MonotonicArena.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalCounter.inline.hpp" inline void* G1MonotonicArena::Segment::allocate_slot() { if (_next_allocate >= _num_slots) { return nullptr; } - uint result = Atomic::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed); + uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed); if (result >= _num_slots) { return nullptr; } @@ -48,8 +48,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() { Segment* result = _list.pop(); if (result != nullptr) { - Atomic::dec(&_num_segments, memory_order_relaxed); - Atomic::sub(&_mem_size, result->mem_size(), memory_order_relaxed); + AtomicAccess::dec(&_num_segments, memory_order_relaxed); + AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed); } return result; } diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp index eb76106d91c82..d7e0c6e394fcc 100644 --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp @@ -28,7 +28,7 @@ #include "nmt/memTracker.hpp" #include "oops/markWord.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" diff --git a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp index 388b378fdcdfa..42c3a872e6b0f 100644 --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp @@ -42,7 +42,7 @@ #include "memory/allocation.inline.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/globalDefinitions.hpp" diff --git a/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp index a4c79cbed7a9a..884a2b2ca55d9 100644 --- a/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp +++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp @@ -24,7 +24,7 @@ #include "gc/g1/g1ParallelCleaning.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #if INCLUDE_JVMCI #include "jvmci/jvmci.hpp" #endif @@ -35,11 +35,11 @@ JVMCICleaningTask::JVMCICleaningTask() : } bool JVMCICleaningTask::claim_cleaning_task() { - if (Atomic::load(&_cleaning_claimed)) { + if (AtomicAccess::load(&_cleaning_claimed)) { return false; } - return !Atomic::cmpxchg(&_cleaning_claimed, false, true); + return !AtomicAccess::cmpxchg(&_cleaning_claimed, false, true); } void JVMCICleaningTask::work(bool unloading_occurred) { diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 6141f1056fec0..9f872aa6ccd67 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -204,8 +204,8 @@ void G1Policy::update_young_length_bounds(size_t pending_cards, size_t card_rs_l // allocation. // That is "fine" - at most this will schedule a GC (hopefully only a little) too // early or too late. - Atomic::store(&_young_list_desired_length, new_young_list_desired_length); - Atomic::store(&_young_list_target_length, new_young_list_target_length); + AtomicAccess::store(&_young_list_desired_length, new_young_list_desired_length); + AtomicAccess::store(&_young_list_target_length, new_young_list_target_length); } // Calculates desired young gen length. It is calculated from: diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp index d449439df8c03..e9f7529e509de 100644 --- a/src/hotspot/share/gc/g1/g1Policy.hpp +++ b/src/hotspot/share/gc/g1/g1Policy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ #include "gc/g1/g1RemSetTrackingPolicy.hpp" #include "gc/g1/g1YoungGenSizer.hpp" #include "gc/shared/gcCause.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/pair.hpp" #include "utilities/ticks.hpp" @@ -352,8 +352,8 @@ class G1Policy: public CHeapObj { // This must be called at the very beginning of an evacuation pause. void decide_on_concurrent_start_pause(); - uint young_list_desired_length() const { return Atomic::load(&_young_list_desired_length); } - uint young_list_target_length() const { return Atomic::load(&_young_list_target_length); } + uint young_list_desired_length() const { return AtomicAccess::load(&_young_list_desired_length); } + uint young_list_target_length() const { return AtomicAccess::load(&_young_list_target_length); } bool should_allocate_mutator_region() const; diff --git a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp index c89812ad08f28..45e262c440aac 100644 --- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp +++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp @@ -24,7 +24,7 @@ #include "gc/g1/g1RedirtyCardsQueue.hpp" #include "gc/shared/bufferNode.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" @@ -132,7 +132,7 @@ void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) { void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) { assert(_collecting, "precondition"); - Atomic::add(&_entry_count, node->size()); + AtomicAccess::add(&_entry_count, node->size()); _list.push(*node); update_tail(node); } @@ -141,7 +141,7 @@ void G1RedirtyCardsQueueSet::add_bufferlist(const BufferNodeList& buffers) { assert(_collecting, "precondition"); if (buffers._head != nullptr) { assert(buffers._tail != nullptr, "invariant"); - Atomic::add(&_entry_count, buffers._entry_count); + AtomicAccess::add(&_entry_count, buffers._entry_count); _list.prepend(*buffers._head, *buffers._tail); update_tail(buffers._tail); } diff --git a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp index d25da3e72b88d..6b0ebb34e0d9f 100644 --- a/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp +++ b/src/hotspot/share/gc/g1/g1RegionMarkStatsCache.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "gc/g1/g1RegionMarkStatsCache.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) { uint const cache_idx = hash(region_idx); @@ -47,11 +47,11 @@ inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCac inline void G1RegionMarkStatsCache::evict(uint idx) { G1RegionMarkStatsCacheEntry* cur = &_cache[idx]; if (cur->_stats._live_words != 0) { - Atomic::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words); + AtomicAccess::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words); } if (cur->_stats._incoming_refs != 0) { - Atomic::add(&_target[cur->_region_idx]._incoming_refs, cur->_stats._incoming_refs); + AtomicAccess::add(&_target[cur->_region_idx]._incoming_refs, cur->_stats._incoming_refs); } cur->clear(); diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 25790a00bd949..2a09512730cf7 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -51,7 +51,7 @@ #include "memory/resourceArea.hpp" #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" @@ -171,9 +171,9 @@ class G1RemSetScanState : public CHeapObj { return; } - bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false; + bool marked_as_dirty = AtomicAccess::cmpxchg(&_contains[region], false, true) == false; if (marked_as_dirty) { - uint allocated = Atomic::fetch_then_add(&_cur_idx, 1u); + uint allocated = AtomicAccess::fetch_then_add(&_cur_idx, 1u); _buffer[allocated] = region; } } @@ -238,7 +238,7 @@ class G1RemSetScanState : public CHeapObj { const uint num_regions_per_worker = num_cards_per_worker / (uint)G1HeapRegion::CardsPerRegion; while (_cur_dirty_regions < _regions->size()) { - uint next = Atomic::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker); + uint next = AtomicAccess::fetch_then_add(&_cur_dirty_regions, num_regions_per_worker); uint max = MIN2(next + num_regions_per_worker, _regions->size()); for (uint i = next; i < max; i++) { @@ -397,7 +397,7 @@ class G1RemSetScanState : public CHeapObj { uint claim_cards_to_scan(uint region, uint increment) { assert(region < _max_reserved_regions, "Tried to access invalid region %u", region); - return Atomic::fetch_then_add(&_card_table_scan_state[region], increment, memory_order_relaxed); + return AtomicAccess::fetch_then_add(&_card_table_scan_state[region], increment, memory_order_relaxed); } void add_dirty_region(uint const region) { @@ -1334,7 +1334,7 @@ class G1MergeHeapRootsTask : public WorkerTask { if (_initial_evacuation && g1h->has_humongous_reclaim_candidates() && !_fast_reclaim_handled && - !Atomic::cmpxchg(&_fast_reclaim_handled, false, true)) { + !AtomicAccess::cmpxchg(&_fast_reclaim_handled, false, true)) { G1GCParPhaseTimesTracker subphase_x(p, G1GCPhaseTimes::MergeER, worker_id); diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index f73c4099ce623..44846fe0845c2 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -438,11 +438,11 @@ class G1PrepareEvacuationTask : public WorkerTask { } void add_humongous_candidates(uint candidates) { - Atomic::add(&_humongous_candidates, candidates); + AtomicAccess::add(&_humongous_candidates, candidates); } void add_humongous_total(uint total) { - Atomic::add(&_humongous_total, total); + AtomicAccess::add(&_humongous_total, total); } uint humongous_candidates() { @@ -679,7 +679,7 @@ class G1EvacuateRegionsBaseTask : public WorkerTask { G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id); pss->set_ref_discoverer(_g1h->ref_processor_stw()); - if (!Atomic::cmpxchg(&_pinned_regions_recorded, false, true)) { + if (!AtomicAccess::cmpxchg(&_pinned_regions_recorded, false, true)) { record_pinned_regions(pss, worker_id); } scan_roots(pss, worker_id); diff --git a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp index b13e7b8a62fde..5b13e8fc20642 100644 --- a/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp +++ b/src/hotspot/share/gc/g1/g1YoungGCPostEvacuateTasks.cpp @@ -596,12 +596,12 @@ class G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask : public G for (uint i = 0; i < _num_buffer_lists; i++) { uint index = (start + i) % _num_buffer_lists; - BufferNode* next = Atomic::load(&_rdc_buffers[index]._head); - BufferNode* tail = Atomic::load(&_rdc_buffers[index]._tail); + BufferNode* next = AtomicAccess::load(&_rdc_buffers[index]._head); + BufferNode* tail = AtomicAccess::load(&_rdc_buffers[index]._tail); while (next != nullptr) { BufferNode* node = next; - next = Atomic::cmpxchg(&_rdc_buffers[index]._head, node, (node != tail ) ? node->next() : nullptr); + next = AtomicAccess::cmpxchg(&_rdc_buffers[index]._head, node, (node != tail ) ? node->next() : nullptr); if (next == node) { cl.apply_to_buffer(node, worker_id); next = (node != tail ) ? node->next() : nullptr; @@ -869,7 +869,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1 virtual ~FreeCollectionSetTask() { Ticks serial_time = Ticks::now(); - bool has_new_retained_regions = Atomic::load(&_num_retained_regions) != 0; + bool has_new_retained_regions = AtomicAccess::load(&_num_retained_regions) != 0; if (has_new_retained_regions) { G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates(); candidates->sort_by_efficiency(); @@ -904,7 +904,7 @@ class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1 // Report per-region type timings. cl.report_timing(); - Atomic::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed); + AtomicAccess::add(&_num_retained_regions, cl.num_retained_regions(), memory_order_relaxed); } }; diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp index d07a989c3a698..08569fd5bd14f 100644 --- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp @@ -30,7 +30,7 @@ #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/java.hpp" #include "runtime/javaThread.hpp" #include "runtime/os.inline.hpp" @@ -571,7 +571,7 @@ HeapWord* MutableNUMASpace::cas_allocate(size_t size) { if (p != nullptr) { HeapWord* cur_top, *cur_chunk_top = p + size; while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. - if (Atomic::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) { + if (AtomicAccess::cmpxchg(top_addr(), cur_top, cur_chunk_top) == cur_top) { break; } } diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index 498fb12511c36..71fddf2c4dad9 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -28,7 +28,7 @@ #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/safepoint.hpp" #include "utilities/align.hpp" @@ -130,7 +130,7 @@ void MutableSpace::initialize(MemRegion mr, // makes the new space available for allocation by other threads. So this // assignment must follow all other configuration and initialization that // might be done for expansion. - Atomic::release_store(end_addr(), mr.end()); + AtomicAccess::release_store(end_addr(), mr.end()); if (clear_space) { clear(mangle_space); @@ -162,10 +162,10 @@ HeapWord* MutableSpace::cas_allocate(size_t size) { // If end is read first, other threads may advance end and top such that // current top > old end and current top + size > current end. Then // pointer_delta underflows, allowing installation of top > current end. - HeapWord* obj = Atomic::load_acquire(top_addr()); + HeapWord* obj = AtomicAccess::load_acquire(top_addr()); if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; - HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top); + HeapWord* result = AtomicAccess::cmpxchg(top_addr(), obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. @@ -184,7 +184,7 @@ HeapWord* MutableSpace::cas_allocate(size_t size) { // Try to deallocate previous allocation. Returns true upon success. bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { HeapWord* expected_top = obj + size; - return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top; + return AtomicAccess::cmpxchg(top_addr(), expected_top, obj) == expected_top; } // Only used by oldgen allocation. diff --git a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp index d2d168cc2c0db..c613c8615f0e4 100644 --- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp +++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp @@ -28,7 +28,7 @@ #include "memory/memoryReserver.hpp" #include "nmt/memTracker.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/bitMap.inline.hpp" diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index c185b8c44371e..21841330fa7b8 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -512,7 +512,7 @@ class HeapBlockClaimer : public StackObj { // Claim the block and get the block index. size_t claim_and_get_block() { size_t block_index; - block_index = Atomic::fetch_then_add(&_claimed_index, 1u); + block_index = AtomicAccess::fetch_then_add(&_claimed_index, 1u); PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen(); size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims; diff --git a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp index 1e4bd6c286843..7515031f01fa1 100644 --- a/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp +++ b/src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.cpp @@ -87,7 +87,7 @@ size_t PSAdaptiveSizePolicy::compute_desired_eden_size(bool is_survivor_overflow double min_gc_distance = MinGCDistanceSecond; // Get a local copy and use it inside gc-pause in case the global var gets updated externally. - const uint local_GCTimeRatio = Atomic::load(&GCTimeRatio); + const uint local_GCTimeRatio = AtomicAccess::load(&GCTimeRatio); const double throughput_goal = calculate_throughput_goal(local_GCTimeRatio); if (mutator_time_percent() < throughput_goal) { diff --git a/src/hotspot/share/gc/parallel/psCardTable.cpp b/src/hotspot/share/gc/parallel/psCardTable.cpp index 22a38d816f639..f35418bc8f341 100644 --- a/src/hotspot/share/gc/parallel/psCardTable.cpp +++ b/src/hotspot/share/gc/parallel/psCardTable.cpp @@ -383,9 +383,9 @@ void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array, preprocess_card_table_parallel(object_start, old_gen_bottom, old_gen_top, stripe_index, n_stripes); // Sync with other workers. - Atomic::dec(&_preprocessing_active_workers); + AtomicAccess::dec(&_preprocessing_active_workers); SpinYield spin_yield; - while (Atomic::load_acquire(&_preprocessing_active_workers) > 0) { + while (AtomicAccess::load_acquire(&_preprocessing_active_workers) > 0) { spin_yield.wait(); } diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index bac536234b694..e63e189861f5c 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -81,7 +81,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/safepoint.hpp" @@ -1298,7 +1298,7 @@ void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_ const size_t stripe_size = num_regions_per_stripe * region_size; while (true) { - uint counter = Atomic::fetch_then_add(claim_counter, num_regions_per_stripe); + uint counter = AtomicAccess::fetch_then_add(claim_counter, num_regions_per_stripe); HeapWord* cur_stripe = bottom + counter * region_size; if (cur_stripe >= top) { break; diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp index a28df24830cd7..d5ed641f48534 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp @@ -34,7 +34,7 @@ #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/taskTerminator.hpp" #include "oops/oop.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" class ParallelScavengeHeap; @@ -443,7 +443,7 @@ inline void ParallelCompactData::RegionData::decrement_destination_count() { assert(_dc_and_los < dc_claimed, "already claimed"); assert(_dc_and_los >= dc_one, "count would go negative"); - Atomic::add(&_dc_and_los, dc_mask); + AtomicAccess::add(&_dc_and_los, dc_mask); } inline void ParallelCompactData::RegionData::set_completed() @@ -466,36 +466,36 @@ inline bool ParallelCompactData::RegionData::claim_unsafe() inline void ParallelCompactData::RegionData::add_live_obj(size_t words) { assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); - Atomic::add(&_dc_and_los, static_cast(words)); + AtomicAccess::add(&_dc_and_los, static_cast(words)); } inline bool ParallelCompactData::RegionData::claim() { const region_sz_t los = static_cast(live_obj_size()); - const region_sz_t old = Atomic::cmpxchg(&_dc_and_los, los, dc_claimed | los); + const region_sz_t old = AtomicAccess::cmpxchg(&_dc_and_los, los, dc_claimed | los); return old == los; } inline bool ParallelCompactData::RegionData::mark_normal() { - return Atomic::cmpxchg(&_shadow_state, UnusedRegion, NormalRegion) == UnusedRegion; + return AtomicAccess::cmpxchg(&_shadow_state, UnusedRegion, NormalRegion) == UnusedRegion; } inline bool ParallelCompactData::RegionData::mark_shadow() { if (_shadow_state != UnusedRegion) return false; - return Atomic::cmpxchg(&_shadow_state, UnusedRegion, ShadowRegion) == UnusedRegion; + return AtomicAccess::cmpxchg(&_shadow_state, UnusedRegion, ShadowRegion) == UnusedRegion; } inline void ParallelCompactData::RegionData::mark_filled() { - int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, FilledShadow); + int old = AtomicAccess::cmpxchg(&_shadow_state, ShadowRegion, FilledShadow); assert(old == ShadowRegion, "Fail to mark the region as filled"); } inline bool ParallelCompactData::RegionData::mark_copied() { - return Atomic::cmpxchg(&_shadow_state, FilledShadow, CopiedShadow) == FilledShadow; + return AtomicAccess::cmpxchg(&_shadow_state, FilledShadow, CopiedShadow) == FilledShadow; } void ParallelCompactData::RegionData::shadow_to_normal() { - int old = Atomic::cmpxchg(&_shadow_state, ShadowRegion, NormalRegion); + int old = AtomicAccess::cmpxchg(&_shadow_state, ShadowRegion, NormalRegion); assert(old == ShadowRegion, "Fail to mark the region as finish"); } diff --git a/src/hotspot/share/gc/parallel/spaceCounters.cpp b/src/hotspot/share/gc/parallel/spaceCounters.cpp index c15d25dc0aab7..8f24373abcb57 100644 --- a/src/hotspot/share/gc/parallel/spaceCounters.cpp +++ b/src/hotspot/share/gc/parallel/spaceCounters.cpp @@ -25,7 +25,7 @@ #include "gc/parallel/spaceCounters.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/debug.hpp" diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp index 0e5c5d02d1ca3..cb5d6b5a88698 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp @@ -197,7 +197,7 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) { // a very rare event. if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) { static volatile uint32_t counter=0; - if (Atomic::add(&counter, 1u) % 10 == 0) { + if (AtomicAccess::add(&counter, 1u) % 10 == 0) { may_enter = false; } } diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp index dd0690d44fb63..3d87cb2e69dfb 100644 --- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTable.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" template inline void CardTableBarrierSet::write_ref_field_post(T* field) { diff --git a/src/hotspot/share/gc/shared/concurrentGCThread.cpp b/src/hotspot/share/gc/shared/concurrentGCThread.cpp index 7d0cecde528bf..ac281f82f8acf 100644 --- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp +++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp @@ -23,7 +23,7 @@ */ #include "gc/shared/concurrentGCThread.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/init.hpp" #include "runtime/jniHandles.hpp" #include "runtime/mutexLocker.hpp" @@ -48,7 +48,7 @@ void ConcurrentGCThread::run() { // Signal thread has terminated MonitorLocker ml(Terminator_lock); - Atomic::release_store(&_has_terminated, true); + AtomicAccess::release_store(&_has_terminated, true); ml.notify_all(); } @@ -57,7 +57,7 @@ void ConcurrentGCThread::stop() { assert(!has_terminated(), "Invalid state"); // Signal thread to terminate - Atomic::release_store_fence(&_should_terminate, true); + AtomicAccess::release_store_fence(&_should_terminate, true); stop_service(); @@ -69,9 +69,9 @@ void ConcurrentGCThread::stop() { } bool ConcurrentGCThread::should_terminate() const { - return Atomic::load_acquire(&_should_terminate); + return AtomicAccess::load_acquire(&_should_terminate); } bool ConcurrentGCThread::has_terminated() const { - return Atomic::load_acquire(&_has_terminated); + return AtomicAccess::load_acquire(&_has_terminated); } diff --git a/src/hotspot/share/gc/shared/freeListAllocator.cpp b/src/hotspot/share/gc/shared/freeListAllocator.cpp index 27f1cd8aeb373..c6801c2be18c7 100644 --- a/src/hotspot/share/gc/shared/freeListAllocator.cpp +++ b/src/hotspot/share/gc/shared/freeListAllocator.cpp @@ -41,26 +41,26 @@ FreeListAllocator::PendingList::PendingList() : size_t FreeListAllocator::PendingList::add(FreeNode* node) { assert(node->next() == nullptr, "precondition"); - FreeNode* old_head = Atomic::xchg(&_head, node); + FreeNode* old_head = AtomicAccess::xchg(&_head, node); if (old_head != nullptr) { node->set_next(old_head); } else { assert(_tail == nullptr, "invariant"); _tail = node; } - return Atomic::add(&_count, size_t(1)); + return AtomicAccess::add(&_count, size_t(1)); } typename FreeListAllocator::NodeList FreeListAllocator::PendingList::take_all() { - NodeList result{Atomic::load(&_head), _tail, Atomic::load(&_count)}; - Atomic::store(&_head, (FreeNode*)nullptr); + NodeList result{AtomicAccess::load(&_head), _tail, AtomicAccess::load(&_count)}; + AtomicAccess::store(&_head, (FreeNode*)nullptr); _tail = nullptr; - Atomic::store(&_count, size_t(0)); + AtomicAccess::store(&_count, size_t(0)); return result; } size_t FreeListAllocator::PendingList::count() const { - return Atomic::load(&_count); + return AtomicAccess::load(&_count); } FreeListAllocator::FreeListAllocator(const char* name, FreeListConfig* config) : @@ -85,7 +85,7 @@ void FreeListAllocator::delete_list(FreeNode* list) { } FreeListAllocator::~FreeListAllocator() { - uint index = Atomic::load(&_active_pending_list); + uint index = AtomicAccess::load(&_active_pending_list); NodeList pending_list = _pending_lists[index].take_all(); delete_list(pending_list._head); delete_list(_free_list.pop_all()); @@ -93,18 +93,18 @@ FreeListAllocator::~FreeListAllocator() { // Drop existing nodes and reset all counters void FreeListAllocator::reset() { - uint index = Atomic::load(&_active_pending_list); + uint index = AtomicAccess::load(&_active_pending_list); _pending_lists[index].take_all(); _free_list.pop_all(); _free_count = 0; } size_t FreeListAllocator::free_count() const { - return Atomic::load(&_free_count); + return AtomicAccess::load(&_free_count); } size_t FreeListAllocator::pending_count() const { - uint index = Atomic::load(&_active_pending_list); + uint index = AtomicAccess::load(&_active_pending_list); return _pending_lists[index].count(); } @@ -124,7 +124,7 @@ void* FreeListAllocator::allocate() { // Decrement count after getting buffer from free list. This, along // with incrementing count before adding to free list, ensures count // never underflows. - size_t count = Atomic::sub(&_free_count, 1u); + size_t count = AtomicAccess::sub(&_free_count, 1u); assert((count + 1) != 0, "_free_count underflow"); return node; } else { @@ -149,7 +149,7 @@ void FreeListAllocator::release(void* free_node) { // we're done with what might be the pending list to be transferred. { GlobalCounter::CriticalSection cs(Thread::current()); - uint index = Atomic::load_acquire(&_active_pending_list); + uint index = AtomicAccess::load_acquire(&_active_pending_list); size_t count = _pending_lists[index].add(node); if (count <= _config->transfer_threshold()) return; } @@ -164,17 +164,17 @@ void FreeListAllocator::release(void* free_node) { // in-progress transfer. bool FreeListAllocator::try_transfer_pending() { // Attempt to claim the lock. - if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail. - Atomic::cmpxchg(&_transfer_lock, false, true)) { + if (AtomicAccess::load(&_transfer_lock) || // Skip CAS if likely to fail. + AtomicAccess::cmpxchg(&_transfer_lock, false, true)) { return false; } // Have the lock; perform the transfer. // Change which pending list is active. Don't need an atomic RMW since // we have the lock and we're the only writer. - uint index = Atomic::load(&_active_pending_list); + uint index = AtomicAccess::load(&_active_pending_list); uint new_active = (index + 1) % ARRAY_SIZE(_pending_lists); - Atomic::release_store(&_active_pending_list, new_active); + AtomicAccess::release_store(&_active_pending_list, new_active); // Wait for all critical sections in the buffer life-cycle to complete. // This includes _free_list pops and adding to the now inactive pending @@ -186,11 +186,11 @@ bool FreeListAllocator::try_transfer_pending() { size_t count = transfer_list._entry_count; if (count > 0) { // Update count first so no underflow in allocate(). - Atomic::add(&_free_count, count); + AtomicAccess::add(&_free_count, count); _free_list.prepend(*transfer_list._head, *transfer_list._tail); log_trace(gc, freelist) ("Transferred %s pending to free: %zu", name(), count); } - Atomic::release_store(&_transfer_lock, false); + AtomicAccess::release_store(&_transfer_lock, false); return true; } diff --git a/src/hotspot/share/gc/shared/freeListAllocator.hpp b/src/hotspot/share/gc/shared/freeListAllocator.hpp index 94a9092295264..07e075a67254f 100644 --- a/src/hotspot/share/gc/shared/freeListAllocator.hpp +++ b/src/hotspot/share/gc/shared/freeListAllocator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" #include "memory/padded.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/lockFreeStack.hpp" @@ -66,11 +66,11 @@ class FreeListAllocator { FreeNode() : _next (nullptr) { } - FreeNode* next() { return Atomic::load(&_next); } + FreeNode* next() { return AtomicAccess::load(&_next); } FreeNode* volatile* next_addr() { return &_next; } - void set_next(FreeNode* next) { Atomic::store(&_next, next); } + void set_next(FreeNode* next) { AtomicAccess::store(&_next, next); } }; struct NodeList { diff --git a/src/hotspot/share/gc/shared/gcLocker.cpp b/src/hotspot/share/gc/shared/gcLocker.cpp index fbc953512bb28..01d17b1117d03 100644 --- a/src/hotspot/share/gc/shared/gcLocker.cpp +++ b/src/hotspot/share/gc/shared/gcLocker.cpp @@ -28,7 +28,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/safepoint.hpp" @@ -84,11 +84,11 @@ bool GCLocker::is_active() { void GCLocker::block() { // _lock is held from the beginning of block() to the end of of unblock(). _lock->lock(); - assert(Atomic::load(&_is_gc_request_pending) == false, "precondition"); + assert(AtomicAccess::load(&_is_gc_request_pending) == false, "precondition"); GCLockerTimingDebugLogger logger("Thread blocked to start GC."); - Atomic::store(&_is_gc_request_pending, true); + AtomicAccess::store(&_is_gc_request_pending, true); // The _is_gc_request_pending and _jni_active_critical (inside // in_critical_atomic()) variables form a Dekker duality. On the GC side, the @@ -112,14 +112,14 @@ void GCLocker::block() { #ifdef ASSERT // Matching the storestore in GCLocker::exit. OrderAccess::loadload(); - assert(Atomic::load(&_verify_in_cr_count) == 0, "inv"); + assert(AtomicAccess::load(&_verify_in_cr_count) == 0, "inv"); #endif } void GCLocker::unblock() { - assert(Atomic::load(&_is_gc_request_pending) == true, "precondition"); + assert(AtomicAccess::load(&_is_gc_request_pending) == true, "precondition"); - Atomic::store(&_is_gc_request_pending, false); + AtomicAccess::store(&_is_gc_request_pending, false); _lock->unlock(); } @@ -139,7 +139,7 @@ void GCLocker::enter_slow(JavaThread* current_thread) { // Same as fast path. OrderAccess::fence(); - if (!Atomic::load(&_is_gc_request_pending)) { + if (!AtomicAccess::load(&_is_gc_request_pending)) { return; } diff --git a/src/hotspot/share/gc/shared/gcLocker.inline.hpp b/src/hotspot/share/gc/shared/gcLocker.inline.hpp index 357b788ce52d0..050b957028047 100644 --- a/src/hotspot/share/gc/shared/gcLocker.inline.hpp +++ b/src/hotspot/share/gc/shared/gcLocker.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,13 +38,13 @@ void GCLocker::enter(JavaThread* current_thread) { // Matching the fence in GCLocker::block. OrderAccess::fence(); - if (Atomic::load(&_is_gc_request_pending)) { + if (AtomicAccess::load(&_is_gc_request_pending)) { current_thread->exit_critical(); // slow-path enter_slow(current_thread); } - DEBUG_ONLY(Atomic::add(&_verify_in_cr_count, (uint64_t)1);) + DEBUG_ONLY(AtomicAccess::add(&_verify_in_cr_count, (uint64_t)1);) } else { current_thread->enter_critical(); } @@ -55,7 +55,7 @@ void GCLocker::exit(JavaThread* current_thread) { #ifdef ASSERT if (current_thread->in_last_critical()) { - Atomic::add(&_verify_in_cr_count, (uint64_t)-1); + AtomicAccess::add(&_verify_in_cr_count, (uint64_t)-1); // Matching the loadload in GCLocker::block. OrderAccess::storestore(); } diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp index d36e9850bdabb..d52efc13dacaf 100644 --- a/src/hotspot/share/gc/shared/oopStorage.cpp +++ b/src/hotspot/share/gc/shared/oopStorage.cpp @@ -28,7 +28,7 @@ #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" #include "nmt/memTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -148,16 +148,16 @@ size_t OopStorage::ActiveArray::block_count() const { } size_t OopStorage::ActiveArray::block_count_acquire() const { - return Atomic::load_acquire(&_block_count); + return AtomicAccess::load_acquire(&_block_count); } void OopStorage::ActiveArray::increment_refcount() const { - int new_value = Atomic::add(&_refcount, 1); + int new_value = AtomicAccess::add(&_refcount, 1); assert(new_value >= 1, "negative refcount %d", new_value - 1); } bool OopStorage::ActiveArray::decrement_refcount() const { - int new_value = Atomic::sub(&_refcount, 1); + int new_value = AtomicAccess::sub(&_refcount, 1); assert(new_value >= 0, "negative refcount %d", new_value); return new_value == 0; } @@ -169,7 +169,7 @@ bool OopStorage::ActiveArray::push(Block* block) { *block_ptr(index) = block; // Use a release_store to ensure all the setup is complete before // making the block visible. - Atomic::release_store(&_block_count, index + 1); + AtomicAccess::release_store(&_block_count, index + 1); return true; } else { return false; @@ -272,8 +272,8 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const { bool OopStorage::Block::is_safe_to_delete() const { assert(is_empty(), "precondition"); OrderAccess::loadload(); - return (Atomic::load_acquire(&_release_refcount) == 0) && - (Atomic::load_acquire(&_deferred_updates_next) == nullptr); + return (AtomicAccess::load_acquire(&_release_refcount) == 0) && + (AtomicAccess::load_acquire(&_deferred_updates_next) == nullptr); } OopStorage::Block* OopStorage::Block::deferred_updates_next() const { @@ -321,7 +321,7 @@ void OopStorage::Block::atomic_add_allocated(uintx add) { // we can use an atomic add to implement the operation. The assert post // facto verifies the precondition held; if there were any set bits in // common, then after the add at least one of them will be zero. - uintx sum = Atomic::add(&_allocated_bitmask, add); + uintx sum = AtomicAccess::add(&_allocated_bitmask, add); assert((sum & add) == add, "some already present: %zu:%zu", sum, add); } @@ -452,7 +452,7 @@ oop* OopStorage::allocate() { oop* result = block->allocate(); assert(result != nullptr, "allocation failed"); assert(!block->is_empty(), "postcondition"); - Atomic::inc(&_allocation_count); // release updates outside lock. + AtomicAccess::inc(&_allocation_count); // release updates outside lock. if (block->is_full()) { // Transitioning from not full to full. // Remove full blocks from consideration by future allocates. @@ -490,7 +490,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) { assert(!is_empty_bitmask(taken), "invariant"); } // Drop lock, now that we've taken all available entries from block. size_t num_taken = population_count(taken); - Atomic::add(&_allocation_count, num_taken); + AtomicAccess::add(&_allocation_count, num_taken); // Fill ptrs from those taken entries. size_t limit = MIN2(num_taken, size); for (size_t i = 0; i < limit; ++i) { @@ -506,7 +506,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) { assert(size == limit, "invariant"); assert(num_taken == (limit + population_count(taken)), "invariant"); block->release_entries(taken, this); - Atomic::sub(&_allocation_count, num_taken - limit); + AtomicAccess::sub(&_allocation_count, num_taken - limit); } log_trace(oopstorage, ref)("%s: bulk allocate %zu, returned %zu", name(), limit, num_taken - limit); @@ -599,7 +599,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) { // Update new_array refcount to account for the new reference. new_array->increment_refcount(); // Install new_array, ensuring its initialization is complete first. - Atomic::release_store(&_active_array, new_array); + AtomicAccess::release_store(&_active_array, new_array); // Wait for any readers that could read the old array from _active_array. // Can't use GlobalCounter here, because this is called from allocate(), // which may be called in the scope of a GlobalCounter critical section @@ -617,7 +617,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) { // using it. OopStorage::ActiveArray* OopStorage::obtain_active_array() const { SingleWriterSynchronizer::CriticalSection cs(&_protect_active); - ActiveArray* result = Atomic::load_acquire(&_active_array); + ActiveArray* result = AtomicAccess::load_acquire(&_active_array); result->increment_refcount(); return result; } @@ -672,14 +672,14 @@ static void log_release_transitions(uintx releasing, void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { assert(releasing != 0, "preconditon"); // Prevent empty block deletion when transitioning to empty. - Atomic::inc(&_release_refcount); + AtomicAccess::inc(&_release_refcount); // Atomically update allocated bitmask. uintx old_allocated = _allocated_bitmask; while (true) { assert((releasing & ~old_allocated) == 0, "releasing unallocated entries"); uintx new_value = old_allocated ^ releasing; - uintx fetched = Atomic::cmpxchg(&_allocated_bitmask, old_allocated, new_value); + uintx fetched = AtomicAccess::cmpxchg(&_allocated_bitmask, old_allocated, new_value); if (fetched == old_allocated) break; // Successful update. old_allocated = fetched; // Retry with updated bitmask. } @@ -698,12 +698,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { // then someone else has made such a claim and the deferred update has not // yet been processed and will include our change, so we don't need to do // anything further. - if (Atomic::replace_if_null(&_deferred_updates_next, this)) { + if (AtomicAccess::replace_if_null(&_deferred_updates_next, this)) { // Successfully claimed. Push, with self-loop for end-of-list. Block* head = owner->_deferred_updates; while (true) { _deferred_updates_next = (head == nullptr) ? this : head; - Block* fetched = Atomic::cmpxchg(&owner->_deferred_updates, head, this); + Block* fetched = AtomicAccess::cmpxchg(&owner->_deferred_updates, head, this); if (fetched == head) break; // Successful update. head = fetched; // Retry with updated head. } @@ -720,7 +720,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) { } } // Release hold on empty block deletion. - Atomic::dec(&_release_refcount); + AtomicAccess::dec(&_release_refcount); } // Process one available deferred update. Returns true if one was processed. @@ -729,13 +729,13 @@ bool OopStorage::reduce_deferred_updates() { // Atomically pop a block off the list, if any available. // No ABA issue because this is only called by one thread at a time. // The atomicity is wrto pushes by release(). - Block* block = Atomic::load_acquire(&_deferred_updates); + Block* block = AtomicAccess::load_acquire(&_deferred_updates); while (true) { if (block == nullptr) return false; // Try atomic pop of block from list. Block* tail = block->deferred_updates_next(); if (block == tail) tail = nullptr; // Handle self-loop end marker. - Block* fetched = Atomic::cmpxchg(&_deferred_updates, block, tail); + Block* fetched = AtomicAccess::cmpxchg(&_deferred_updates, block, tail); if (fetched == block) break; // Update successful. block = fetched; // Retry with updated block. } @@ -780,7 +780,7 @@ void OopStorage::release(const oop* ptr) { assert(block != nullptr, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr)); log_trace(oopstorage, ref)("%s: releasing " PTR_FORMAT, name(), p2i(ptr)); block->release_entries(block->bitmask_for_entry(ptr), this); - Atomic::dec(&_allocation_count); + AtomicAccess::dec(&_allocation_count); } void OopStorage::release(const oop* const* ptrs, size_t size) { @@ -806,7 +806,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) { } // Release the contiguous entries that are in block. block->release_entries(releasing, this); - Atomic::sub(&_allocation_count, count); + AtomicAccess::sub(&_allocation_count, count); } } @@ -906,12 +906,12 @@ const jlong cleanup_defer_period = 500 * NANOSECS_PER_MILLISEC; bool OopStorage::has_cleanup_work_and_reset() { assert_lock_strong(Service_lock); - if (Atomic::load_acquire(&needs_cleanup_requested) && + if (AtomicAccess::load_acquire(&needs_cleanup_requested) && os::javaTimeNanos() > cleanup_permit_time) { cleanup_permit_time = os::javaTimeNanos() + cleanup_defer_period; // Set the request flag false and return its old value. - Atomic::release_store(&needs_cleanup_requested, false); + AtomicAccess::release_store(&needs_cleanup_requested, false); return true; } else { return false; @@ -923,22 +923,22 @@ bool OopStorage::has_cleanup_work_and_reset() { void OopStorage::record_needs_cleanup() { // Set local flag first, else ServiceThread could wake up and miss // the request. - Atomic::release_store(&_needs_cleanup, true); - Atomic::release_store_fence(&needs_cleanup_requested, true); + AtomicAccess::release_store(&_needs_cleanup, true); + AtomicAccess::release_store_fence(&needs_cleanup_requested, true); } bool OopStorage::delete_empty_blocks() { // ServiceThread might have oopstorage work, but not for this object. // But check for deferred updates, which might provide cleanup work. - if (!Atomic::load_acquire(&_needs_cleanup) && - (Atomic::load_acquire(&_deferred_updates) == nullptr)) { + if (!AtomicAccess::load_acquire(&_needs_cleanup) && + (AtomicAccess::load_acquire(&_deferred_updates) == nullptr)) { return false; } MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Clear the request before processing. - Atomic::release_store_fence(&_needs_cleanup, false); + AtomicAccess::release_store_fence(&_needs_cleanup, false); // Other threads could be adding to the empty block count or the // deferred update list while we're working. Set an upper bound on @@ -1084,7 +1084,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) { bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { data->_processed += data->_segment_end - data->_segment_start; - size_t start = Atomic::load_acquire(&_next_block); + size_t start = AtomicAccess::load_acquire(&_next_block); if (start >= _block_count) { return finish_iteration(data); // No more blocks available. } @@ -1097,11 +1097,11 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) { size_t max_step = 10; size_t remaining = _block_count - start; size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count)); - // Atomic::add with possible overshoot. This can perform better + // AtomicAccess::add with possible overshoot. This can perform better // than a CAS loop on some platforms when there is contention. // We can cope with the uncertainty by recomputing start/end from // the result of the add, and dealing with potential overshoot. - size_t end = Atomic::add(&_next_block, step); + size_t end = AtomicAccess::add(&_next_block, step); // _next_block may have changed, so recompute start from result of add. start = end - step; // _next_block may have changed so much that end has overshot. @@ -1128,15 +1128,15 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons } size_t OopStorage::BasicParState::num_dead() const { - return Atomic::load(&_num_dead); + return AtomicAccess::load(&_num_dead); } void OopStorage::BasicParState::increment_num_dead(size_t num_dead) { - Atomic::add(&_num_dead, num_dead); + AtomicAccess::add(&_num_dead, num_dead); } void OopStorage::BasicParState::report_num_dead() const { - _storage->report_num_dead(Atomic::load(&_num_dead)); + _storage->report_num_dead(AtomicAccess::load(&_num_dead)); } const char* OopStorage::name() const { return _name; } diff --git a/src/hotspot/share/gc/shared/oopStorageSetParState.inline.hpp b/src/hotspot/share/gc/shared/oopStorageSetParState.inline.hpp index 7aa63606c7795..8e220e745e591 100644 --- a/src/hotspot/share/gc/shared/oopStorageSetParState.inline.hpp +++ b/src/hotspot/share/gc/shared/oopStorageSetParState.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "gc/shared/oopStorageSet.hpp" #include "memory/iterator.hpp" #include "oops/access.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" template diff --git a/src/hotspot/share/gc/shared/parallelCleaning.cpp b/src/hotspot/share/gc/shared/parallelCleaning.cpp index 0334230eb4eba..9d496783ca266 100644 --- a/src/hotspot/share/gc/shared/parallelCleaning.cpp +++ b/src/hotspot/share/gc/shared/parallelCleaning.cpp @@ -28,7 +28,7 @@ #include "gc/shared/parallelCleaning.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" CodeCacheUnloadingTask::CodeCacheUnloadingTask(uint num_workers, bool unloading_occurred) : _unloading_occurred(unloading_occurred), @@ -68,7 +68,7 @@ void CodeCacheUnloadingTask::claim_nmethods(nmethod** claimed_nmethods, int *num } } - } while (Atomic::cmpxchg(&_claimed_nmethod, first, last.method()) != first); + } while (AtomicAccess::cmpxchg(&_claimed_nmethod, first, last.method()) != first); } void CodeCacheUnloadingTask::work(uint worker_id) { @@ -104,7 +104,7 @@ bool KlassCleaningTask::claim_clean_klass_tree_task() { return false; } - return !Atomic::cmpxchg(&_clean_klass_tree_claimed, false, true); + return !AtomicAccess::cmpxchg(&_clean_klass_tree_claimed, false, true); } InstanceKlass* KlassCleaningTask::claim_next_klass() { diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp index 8db39281a05ec..f913f3db4ba76 100644 --- a/src/hotspot/share/gc/shared/partialArrayState.cpp +++ b/src/hotspot/share/gc/shared/partialArrayState.cpp @@ -27,7 +27,7 @@ #include "memory/arena.hpp" #include "nmt/memTag.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -48,7 +48,7 @@ PartialArrayState::PartialArrayState(oop src, oop dst, } void PartialArrayState::add_references(size_t count) { - size_t new_count = Atomic::add(&_refcount, count, memory_order_relaxed); + size_t new_count = AtomicAccess::add(&_refcount, count, memory_order_relaxed); assert(new_count >= count, "reference count overflow"); } @@ -93,7 +93,7 @@ PartialArrayState* PartialArrayStateAllocator::allocate(oop src, oop dst, } void PartialArrayStateAllocator::release(PartialArrayState* state) { - size_t refcount = Atomic::sub(&state->_refcount, size_t(1), memory_order_release); + size_t refcount = AtomicAccess::sub(&state->_refcount, size_t(1), memory_order_release); if (refcount != 0) { assert(refcount + 1 != 0, "refcount underflow"); } else { @@ -117,25 +117,25 @@ PartialArrayStateManager::~PartialArrayStateManager() { } Arena* PartialArrayStateManager::register_allocator() { - uint idx = Atomic::fetch_then_add(&_registered_allocators, 1u, memory_order_relaxed); + uint idx = AtomicAccess::fetch_then_add(&_registered_allocators, 1u, memory_order_relaxed); assert(idx < _max_allocators, "exceeded configured max number of allocators"); return ::new (&_arenas[idx]) Arena(mtGC); } #ifdef ASSERT void PartialArrayStateManager::release_allocator() { - uint old = Atomic::fetch_then_add(&_released_allocators, 1u, memory_order_relaxed); - assert(old < Atomic::load(&_registered_allocators), "too many releases"); + uint old = AtomicAccess::fetch_then_add(&_released_allocators, 1u, memory_order_relaxed); + assert(old < AtomicAccess::load(&_registered_allocators), "too many releases"); } #endif // ASSERT void PartialArrayStateManager::reset() { - uint count = Atomic::load(&_registered_allocators); - assert(count == Atomic::load(&_released_allocators), + uint count = AtomicAccess::load(&_registered_allocators); + assert(count == AtomicAccess::load(&_released_allocators), "some allocators still active"); for (uint i = 0; i < count; ++i) { _arenas[i].~Arena(); } - Atomic::store(&_registered_allocators, 0u); - DEBUG_ONLY(Atomic::store(&_released_allocators, 0u);) + AtomicAccess::store(&_registered_allocators, 0u); + DEBUG_ONLY(AtomicAccess::store(&_released_allocators, 0u);) } diff --git a/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp b/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp index 9d127697e5875..3693abaf8cf99 100644 --- a/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp +++ b/src/hotspot/share/gc/shared/partialArrayTaskStepper.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "gc/shared/partialArrayTaskStepper.hpp" #include "gc/shared/partialArrayState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/checkedCast.hpp" #include "utilities/debug.hpp" @@ -52,9 +52,9 @@ PartialArrayTaskStepper::next_impl(size_t length, volatile size_t* index_addr) c // Because we limit the number of enqueued tasks to being no more than the // number of remaining chunks to process, we can use an atomic add for the // claim, rather than a CAS loop. - size_t start = Atomic::fetch_then_add(index_addr, - _chunk_size, - memory_order_relaxed); + size_t start = AtomicAccess::fetch_then_add(index_addr, + _chunk_size, + memory_order_relaxed); assert(start < length, "invariant: start %zu, length %zu", start, length); assert(((length - start) % _chunk_size) == 0, diff --git a/src/hotspot/share/gc/shared/plab.inline.hpp b/src/hotspot/share/gc/shared/plab.inline.hpp index 27eab27453e3f..020738352d345 100644 --- a/src/hotspot/share/gc/shared/plab.inline.hpp +++ b/src/hotspot/share/gc/shared/plab.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,22 +29,22 @@ #include "gc/shared/collectedHeap.inline.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" void PLABStats::add_allocated(size_t v) { - Atomic::add(&_allocated, v); + AtomicAccess::add(&_allocated, v); } void PLABStats::add_unused(size_t v) { - Atomic::add(&_unused, v); + AtomicAccess::add(&_unused, v); } void PLABStats::add_wasted(size_t v) { - Atomic::add(&_wasted, v); + AtomicAccess::add(&_wasted, v); } void PLABStats::add_undo_wasted(size_t v) { - Atomic::add(&_undo_wasted, v); + AtomicAccess::add(&_undo_wasted, v); } #endif // SHARE_GC_SHARED_PLAB_INLINE_HPP diff --git a/src/hotspot/share/gc/shared/preservedMarks.cpp b/src/hotspot/share/gc/shared/preservedMarks.cpp index 0576757524347..1c9f1c82e6fa5 100644 --- a/src/hotspot/share/gc/shared/preservedMarks.cpp +++ b/src/hotspot/share/gc/shared/preservedMarks.cpp @@ -29,7 +29,7 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/macros.hpp" void PreservedMarks::restore() { @@ -60,7 +60,7 @@ void PreservedMarks::restore_and_increment(volatile size_t* const total_size_add restore(); // Only do the atomic add if the size is > 0. if (stack_size > 0) { - Atomic::add(total_size_addr, stack_size); + AtomicAccess::add(total_size_addr, stack_size); } } diff --git a/src/hotspot/share/gc/shared/pretouchTask.cpp b/src/hotspot/share/gc/shared/pretouchTask.cpp index e06c561b6a8ad..cc84c8c449d27 100644 --- a/src/hotspot/share/gc/shared/pretouchTask.cpp +++ b/src/hotspot/share/gc/shared/pretouchTask.cpp @@ -25,7 +25,7 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/pretouchTask.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" @@ -52,11 +52,11 @@ size_t PretouchTask::chunk_size() { void PretouchTask::work(uint worker_id) { while (true) { - char* cur_start = Atomic::load(&_cur_addr); + char* cur_start = AtomicAccess::load(&_cur_addr); char* cur_end = cur_start + MIN2(_chunk_size, pointer_delta(_end_addr, cur_start, 1)); if (cur_start >= cur_end) { break; - } else if (cur_start == Atomic::cmpxchg(&_cur_addr, cur_start, cur_end)) { + } else if (cur_start == AtomicAccess::cmpxchg(&_cur_addr, cur_start, cur_end)) { os::pretouch_memory(cur_start, cur_end, _page_size); } // Else attempt to claim chunk failed, so try again. } diff --git a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp index fd2bd8f3edc67..df7d8f7b38d1a 100644 --- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp +++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp @@ -30,7 +30,7 @@ #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #define ASSERT_REF_TYPE(ref_type) assert((ref_type) >= REF_SOFT && (ref_type) <= REF_PHANTOM, \ "Invariant (%d)", (int)ref_type) @@ -214,7 +214,7 @@ ReferenceProcessorPhaseTimes::~ReferenceProcessorPhaseTimes() { void ReferenceProcessorPhaseTimes::add_ref_dropped(ReferenceType ref_type, size_t count) { ASSERT_REF_TYPE(ref_type); - Atomic::add(&_ref_dropped[ref_type_2_index(ref_type)], count, memory_order_relaxed); + AtomicAccess::add(&_ref_dropped[ref_type_2_index(ref_type)], count, memory_order_relaxed); } void ReferenceProcessorPhaseTimes::set_ref_discovered(ReferenceType ref_type, size_t count) { diff --git a/src/hotspot/share/gc/shared/satbMarkQueue.cpp b/src/hotspot/share/gc/shared/satbMarkQueue.cpp index 3cba3baf5f1c7..e6ffe39facfce 100644 --- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp +++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp @@ -27,7 +27,7 @@ #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" @@ -87,26 +87,26 @@ SATBMarkQueueSet::~SATBMarkQueueSet() { // Increment count. If count > threshold, set flag, else maintain flag. static void increment_count(volatile size_t* cfptr, size_t threshold) { size_t old; - size_t value = Atomic::load(cfptr); + size_t value = AtomicAccess::load(cfptr); do { old = value; value += 2; assert(value > old, "overflow"); if (value > threshold) value |= 1; - value = Atomic::cmpxchg(cfptr, old, value); + value = AtomicAccess::cmpxchg(cfptr, old, value); } while (value != old); } // Decrement count. If count == 0, clear flag, else maintain flag. static void decrement_count(volatile size_t* cfptr) { size_t old; - size_t value = Atomic::load(cfptr); + size_t value = AtomicAccess::load(cfptr); do { assert((value >> 1) != 0, "underflow"); old = value; value -= 2; if (value <= 1) value = 0; - value = Atomic::cmpxchg(cfptr, old, value); + value = AtomicAccess::cmpxchg(cfptr, old, value); } while (value != old); } @@ -332,7 +332,7 @@ void SATBMarkQueueSet::print_all(const char* msg) { #endif // PRODUCT void SATBMarkQueueSet::abandon_completed_buffers() { - Atomic::store(&_count_and_process_flag, size_t(0)); + AtomicAccess::store(&_count_and_process_flag, size_t(0)); BufferNode* buffers_to_delete = _list.pop_all(); while (buffers_to_delete != nullptr) { BufferNode* bn = buffers_to_delete; diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp index 9256a70adbc98..08476cb2a3ad6 100644 --- a/src/hotspot/share/gc/shared/space.cpp +++ b/src/hotspot/share/gc/shared/space.cpp @@ -30,7 +30,7 @@ #include "memory/iterator.inline.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/java.hpp" #include "runtime/safepoint.hpp" #include "utilities/align.hpp" @@ -126,7 +126,7 @@ inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { HeapWord* obj = top(); if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; - HeapWord* result = Atomic::cmpxchg(top_addr(), obj, new_top); + HeapWord* result = AtomicAccess::cmpxchg(top_addr(), obj, new_top); // result can be one of two: // the old top value: the exchange succeeded // otherwise: the new value of the top is returned. diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp index f4f69c8269ff6..8bf6f4e539a5c 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp @@ -36,7 +36,7 @@ #include "memory/iterator.hpp" #include "nmt/memTag.hpp" #include "oops/access.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/cpuTimeCounters.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -75,7 +75,7 @@ void StringDedup::Processor::wait_for_requests() const { { ThreadBlockInVM tbivm(_thread); MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); - OopStorage* storage = Atomic::load(&_storage_for_requests)->storage(); + OopStorage* storage = AtomicAccess::load(&_storage_for_requests)->storage(); while ((storage->allocation_count() == 0) && !Table::is_dead_entry_removal_needed()) { ml.wait(); @@ -83,7 +83,7 @@ void StringDedup::Processor::wait_for_requests() const { } // Swap the request and processing storage objects. log_trace(stringdedup)("swapping request storages"); - _storage_for_processing = Atomic::xchg(&_storage_for_requests, _storage_for_processing); + _storage_for_processing = AtomicAccess::xchg(&_storage_for_requests, _storage_for_processing); GlobalCounter::write_synchronize(); // Wait for the now current processing storage object to no longer be used // by an in-progress GC. Again here, the num-dead notification from the diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupStorageUse.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupStorageUse.cpp index 175c63421e191..427058f3e7eb0 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupStorageUse.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStorageUse.cpp @@ -23,7 +23,7 @@ */ #include "gc/shared/stringdedup/stringDedupStorageUse.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "utilities/debug.hpp" #include "utilities/globalCounter.inline.hpp" @@ -34,18 +34,18 @@ StringDedup::StorageUse::StorageUse(OopStorage* storage) : {} bool StringDedup::StorageUse::is_used_acquire() const { - return Atomic::load_acquire(&_use_count) > 0; + return AtomicAccess::load_acquire(&_use_count) > 0; } StringDedup::StorageUse* StringDedup::StorageUse::obtain(StorageUse* volatile* ptr) { GlobalCounter::CriticalSection cs(Thread::current()); - StorageUse* storage = Atomic::load(ptr); - Atomic::inc(&storage->_use_count); + StorageUse* storage = AtomicAccess::load(ptr); + AtomicAccess::inc(&storage->_use_count); return storage; } void StringDedup::StorageUse::relinquish() { - size_t result = Atomic::sub(&_use_count, size_t(1)); + size_t result = AtomicAccess::sub(&_use_count, size_t(1)); assert(result != SIZE_MAX, "use count underflow"); } diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp index 83c6fea8c5b8e..5f04be97b7413 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp @@ -243,20 +243,20 @@ void StringDedup::Table::num_dead_callback(size_t num_dead) { // Lock while modifying dead count and state. MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); - switch (Atomic::load(&_dead_state)) { + switch (AtomicAccess::load(&_dead_state)) { case DeadState::good: - Atomic::store(&_dead_count, num_dead); + AtomicAccess::store(&_dead_count, num_dead); break; case DeadState::wait1: // Set count first, so dedup thread gets this or a later value if it // sees the good state. - Atomic::store(&_dead_count, num_dead); - Atomic::release_store(&_dead_state, DeadState::good); + AtomicAccess::store(&_dead_count, num_dead); + AtomicAccess::release_store(&_dead_state, DeadState::good); break; case DeadState::wait2: - Atomic::release_store(&_dead_state, DeadState::wait1); + AtomicAccess::release_store(&_dead_state, DeadState::wait1); break; case DeadState::cleaning: @@ -475,19 +475,19 @@ void StringDedup::Table::add(TableValue tv, uint hash_code) { } bool StringDedup::Table::is_dead_count_good_acquire() { - return Atomic::load_acquire(&_dead_state) == DeadState::good; + return AtomicAccess::load_acquire(&_dead_state) == DeadState::good; } // Should be consistent with cleanup_start_if_needed. bool StringDedup::Table::is_grow_needed() { return is_dead_count_good_acquire() && - ((_number_of_entries - Atomic::load(&_dead_count)) > _grow_threshold); + ((_number_of_entries - AtomicAccess::load(&_dead_count)) > _grow_threshold); } // Should be consistent with cleanup_start_if_needed. bool StringDedup::Table::is_dead_entry_removal_needed() { return is_dead_count_good_acquire() && - Config::should_cleanup_table(_number_of_entries, Atomic::load(&_dead_count)); + Config::should_cleanup_table(_number_of_entries, AtomicAccess::load(&_dead_count)); } StringDedup::Table::TableValue @@ -646,7 +646,7 @@ bool StringDedup::Table::cleanup_start_if_needed(bool grow_only, bool force) { // If dead count is good then we can read it once and use it below // without needing any locking. The recorded count could increase // after the read, but that's okay. - size_t dead_count = Atomic::load(&_dead_count); + size_t dead_count = AtomicAccess::load(&_dead_count); // This assertion depends on dead state tracking. Otherwise, concurrent // reference processing could detect some, but a cleanup operation could // remove them before they are reported. @@ -670,8 +670,8 @@ bool StringDedup::Table::cleanup_start_if_needed(bool grow_only, bool force) { void StringDedup::Table::set_dead_state_cleaning() { MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); - Atomic::store(&_dead_count, size_t(0)); - Atomic::store(&_dead_state, DeadState::cleaning); + AtomicAccess::store(&_dead_count, size_t(0)); + AtomicAccess::store(&_dead_state, DeadState::cleaning); } bool StringDedup::Table::start_resizer(bool grow_only, size_t number_of_entries) { @@ -705,7 +705,7 @@ void StringDedup::Table::cleanup_end() { delete _cleanup_state; _cleanup_state = nullptr; MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag); - Atomic::store(&_dead_state, DeadState::wait2); + AtomicAccess::store(&_dead_state, DeadState::wait2); } void StringDedup::Table::verify() { diff --git a/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp index 35a446de532e9..83783b31ad996 100644 --- a/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp +++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp @@ -96,7 +96,7 @@ void SuspendibleThreadSet::synchronize() { { MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag); assert(!should_yield(), "Only one at a time"); - Atomic::store(&_suspend_all, true); + AtomicAccess::store(&_suspend_all, true); if (is_synchronized()) { return; } @@ -127,6 +127,6 @@ void SuspendibleThreadSet::desynchronize() { MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag); assert(should_yield(), "STS not synchronizing"); assert(is_synchronized(), "STS not synchronized"); - Atomic::store(&_suspend_all, false); + AtomicAccess::store(&_suspend_all, false); ml.notify_all(); } diff --git a/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp b/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp index f88fb6570edee..38568c015bc2f 100644 --- a/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp +++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" // A SuspendibleThreadSet is a set of threads that can be suspended. // A thread can join and later leave the set, and periodically yield. @@ -59,7 +59,7 @@ class SuspendibleThreadSet : public AllStatic { public: // Returns true if an suspension is in progress. - static bool should_yield() { return Atomic::load(&_suspend_all); } + static bool should_yield() { return AtomicAccess::load(&_suspend_all); } // Suspends the current thread if a suspension is in progress. static void yield() { diff --git a/src/hotspot/share/gc/shared/taskqueue.cpp b/src/hotspot/share/gc/shared/taskqueue.cpp index 162eadc3cf0d2..f75dc4c292326 100644 --- a/src/hotspot/share/gc/shared/taskqueue.cpp +++ b/src/hotspot/share/gc/shared/taskqueue.cpp @@ -25,7 +25,7 @@ #include "gc/shared/taskqueue.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp index 42d32f3dc96b9..1c36e18894aa3 100644 --- a/src/hotspot/share/gc/shared/taskqueue.hpp +++ b/src/hotspot/share/gc/shared/taskqueue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "memory/allocation.hpp" #include "memory/padded.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" @@ -140,36 +140,36 @@ class TaskQueueSuper: public CHeapObj { }; uint bottom_relaxed() const { - return Atomic::load(&_bottom); + return AtomicAccess::load(&_bottom); } uint bottom_acquire() const { - return Atomic::load_acquire(&_bottom); + return AtomicAccess::load_acquire(&_bottom); } void set_bottom_relaxed(uint new_bottom) { - Atomic::store(&_bottom, new_bottom); + AtomicAccess::store(&_bottom, new_bottom); } void release_set_bottom(uint new_bottom) { - Atomic::release_store(&_bottom, new_bottom); + AtomicAccess::release_store(&_bottom, new_bottom); } Age age_relaxed() const { - return Age(Atomic::load(&_age._data)); + return Age(AtomicAccess::load(&_age._data)); } void set_age_relaxed(Age new_age) { - Atomic::store(&_age._data, new_age._data); + AtomicAccess::store(&_age._data, new_age._data); } Age cmpxchg_age(Age old_age, Age new_age) { - return Age(Atomic::cmpxchg(&_age._data, old_age._data, new_age._data)); + return Age(AtomicAccess::cmpxchg(&_age._data, old_age._data, new_age._data)); } idx_t age_top_relaxed() const { // Atomically accessing a subfield of an "atomic" member. - return Atomic::load(&_age._fields._top); + return AtomicAccess::load(&_age._fields._top); } // These both operate mod N. diff --git a/src/hotspot/share/gc/shared/taskqueue.inline.hpp b/src/hotspot/share/gc/shared/taskqueue.inline.hpp index d28d8c8b6f004..f115d94740bd6 100644 --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" #include "utilities/ostream.hpp" diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp index 168b4398240b4..dc5e415ea38f6 100644 --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -30,7 +30,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/perfData.hpp" #include "runtime/threadSMR.hpp" @@ -460,9 +460,9 @@ size_t ThreadLocalAllocBuffer::end_reserve() { } const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const { - return Atomic::load(&_start); + return AtomicAccess::load(&_start); } const HeapWord* ThreadLocalAllocBuffer::top_relaxed() const { - return Atomic::load(&_top); + return AtomicAccess::load(&_top); } diff --git a/src/hotspot/share/gc/shared/workerThread.cpp b/src/hotspot/share/gc/shared/workerThread.cpp index ffeb3a4194b7b..3a999da59dc17 100644 --- a/src/hotspot/share/gc/shared/workerThread.cpp +++ b/src/hotspot/share/gc/shared/workerThread.cpp @@ -26,7 +26,7 @@ #include "gc/shared/workerThread.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/os.hpp" @@ -61,7 +61,7 @@ void WorkerTaskDispatcher::worker_run_task() { _start_semaphore.wait(); // Get and set worker id. - const uint worker_id = Atomic::fetch_then_add(&_started, 1u); + const uint worker_id = AtomicAccess::fetch_then_add(&_started, 1u); WorkerThread::set_worker_id(worker_id); // Run task. @@ -70,7 +70,7 @@ void WorkerTaskDispatcher::worker_run_task() { // Mark that the worker is done with the task. // The worker is not allowed to read the state variables after this line. - const uint not_finished = Atomic::sub(&_not_finished, 1u); + const uint not_finished = AtomicAccess::sub(&_not_finished, 1u); // The last worker signals to the coordinator that all work is completed. if (not_finished == 0) { diff --git a/src/hotspot/share/gc/shared/workerUtils.cpp b/src/hotspot/share/gc/shared/workerUtils.cpp index 40b78d3f6223b..422d513a5cd22 100644 --- a/src/hotspot/share/gc/shared/workerUtils.cpp +++ b/src/hotspot/share/gc/shared/workerUtils.cpp @@ -23,7 +23,7 @@ */ #include "gc/shared/workerUtils.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" // *** WorkerThreadsBarrierSync @@ -88,7 +88,7 @@ SubTasksDone::SubTasksDone(uint n) : #ifdef ASSERT void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) { - if (Atomic::cmpxchg(&_verification_done, false, true)) { + if (AtomicAccess::cmpxchg(&_verification_done, false, true)) { // another thread has done the verification return; } @@ -116,7 +116,7 @@ void SubTasksDone::all_tasks_claimed_impl(uint skipped[], size_t skipped_size) { bool SubTasksDone::try_claim_task(uint t) { assert(t < _n_tasks, "bad task id."); - return !_tasks[t] && !Atomic::cmpxchg(&_tasks[t], false, true); + return !_tasks[t] && !AtomicAccess::cmpxchg(&_tasks[t], false, true); } SubTasksDone::~SubTasksDone() { @@ -129,7 +129,7 @@ SubTasksDone::~SubTasksDone() { bool SequentialSubTasksDone::try_claim_task(uint& t) { t = _num_claimed; if (t < _num_tasks) { - t = Atomic::add(&_num_claimed, 1u) - 1; + t = AtomicAccess::add(&_num_claimed, 1u) - 1; } return t < _num_tasks; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp index 725e4e6e3e9f9..427eaad26ce21 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp @@ -39,7 +39,7 @@ #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" #include "memory/iterator.inline.hpp" #include "oops/compressedOops.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" // diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp index 0d9077be2265b..ec39e0c0ccb0a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp @@ -33,7 +33,7 @@ #include "gc/shenandoah/shenandoahUtils.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/powerOfTwo.hpp" diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 35faa40af7715..a58b93111833d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -33,7 +33,7 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "nmt/memTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/copy.hpp" ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) : @@ -150,11 +150,11 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { // before hitting the (potentially contended) atomic index. size_t max = _heap->num_regions(); - size_t old = Atomic::load(&_current_index); + size_t old = AtomicAccess::load(&_current_index); for (size_t index = old; index < max; index++) { if (is_in(index)) { - size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed); + size_t cur = AtomicAccess::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed); assert(cur >= old, "Always move forward"); if (cur == old) { // Successfully moved the claim index, this is our region. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp index c5aa4e6e44bf9..220f3df8d4f82 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahController.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahController.cpp @@ -31,11 +31,11 @@ void ShenandoahController::update_gc_id() { - Atomic::inc(&_gc_id); + AtomicAccess::inc(&_gc_id); } size_t ShenandoahController::get_gc_id() { - return Atomic::load(&_gc_id); + return AtomicAccess::load(&_gc_id); } void ShenandoahController::handle_alloc_failure(const ShenandoahAllocRequest& req, bool block) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp index 9c84ac41e8415..dd09bec8a7c1a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp @@ -37,22 +37,22 @@ ShenandoahEvacOOMCounter::ShenandoahEvacOOMCounter() : void ShenandoahEvacOOMCounter::decrement() { assert(unmasked_count() > 0, "sanity"); // NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive. - Atomic::dec(&_bits); + AtomicAccess::dec(&_bits); } void ShenandoahEvacOOMCounter::clear() { assert(unmasked_count() == 0, "sanity"); - Atomic::release_store_fence(&_bits, (jint)0); + AtomicAccess::release_store_fence(&_bits, (jint)0); } void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) { - jint threads_in_evac = Atomic::load_acquire(&_bits); + jint threads_in_evac = AtomicAccess::load_acquire(&_bits); while (true) { jint newval = decrement ? (threads_in_evac - 1) | OOM_MARKER_MASK : threads_in_evac | OOM_MARKER_MASK; - jint other = Atomic::cmpxchg(&_bits, threads_in_evac, newval); + jint other = AtomicAccess::cmpxchg(&_bits, threads_in_evac, newval); if (other == threads_in_evac) { // Success: wait for other threads to get out of the protocol and return. break; @@ -65,7 +65,7 @@ void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) { bool ShenandoahEvacOOMCounter::try_increment() { - jint threads_in_evac = Atomic::load_acquire(&_bits); + jint threads_in_evac = AtomicAccess::load_acquire(&_bits); while (true) { // Cannot enter evacuation if OOM_MARKER_MASK is set. @@ -73,7 +73,7 @@ bool ShenandoahEvacOOMCounter::try_increment() return false; } - jint other = Atomic::cmpxchg(&_bits, threads_in_evac, threads_in_evac + 1); + jint other = AtomicAccess::cmpxchg(&_bits, threads_in_evac, threads_in_evac + 1); if (other == threads_in_evac) { // Success: caller may safely enter evacuation return true; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp index 5c49c2edbb2dc..11509ec9d2dbd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp @@ -29,14 +29,14 @@ #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" jint ShenandoahEvacOOMCounter::load_acquire() { - return Atomic::load_acquire(&_bits); + return AtomicAccess::load_acquire(&_bits); } jint ShenandoahEvacOOMCounter::unmasked_count() { - return Atomic::load_acquire(&_bits) & ~OOM_MARKER_MASK; + return AtomicAccess::load_acquire(&_bits) & ~OOM_MARKER_MASK; } void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 7b3839dc19803..eef0764ce7463 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -148,15 +148,15 @@ ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode } size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { - return Atomic::load(&_bytes_allocated_since_gc_start); + return AtomicAccess::load(&_bytes_allocated_since_gc_start); } void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { - Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); + AtomicAccess::store(&_bytes_allocated_since_gc_start, (size_t)0); } void ShenandoahGeneration::increase_allocated(size_t bytes) { - Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); + AtomicAccess::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); } void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { @@ -854,7 +854,7 @@ size_t ShenandoahGeneration::increment_affiliated_region_count() { // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with // a coherent value. - return Atomic::add(&_affiliated_region_count, (size_t) 1); + return AtomicAccess::add(&_affiliated_region_count, (size_t) 1); } size_t ShenandoahGeneration::decrement_affiliated_region_count() { @@ -862,7 +862,7 @@ size_t ShenandoahGeneration::decrement_affiliated_region_count() { // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with // a coherent value. - auto affiliated_region_count = Atomic::sub(&_affiliated_region_count, (size_t) 1); + auto affiliated_region_count = AtomicAccess::sub(&_affiliated_region_count, (size_t) 1); assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), "used + humongous cannot exceed regions"); @@ -870,19 +870,19 @@ size_t ShenandoahGeneration::decrement_affiliated_region_count() { } size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() { - return Atomic::sub(&_affiliated_region_count, (size_t) 1); + return AtomicAccess::sub(&_affiliated_region_count, (size_t) 1); } size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { shenandoah_assert_heaplocked_or_safepoint(); - return Atomic::add(&_affiliated_region_count, delta); + return AtomicAccess::add(&_affiliated_region_count, delta); } size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { shenandoah_assert_heaplocked_or_safepoint(); - assert(Atomic::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative"); + assert(AtomicAccess::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative"); - auto const affiliated_region_count = Atomic::sub(&_affiliated_region_count, delta); + auto const affiliated_region_count = AtomicAccess::sub(&_affiliated_region_count, delta); assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), "used + humongous cannot exceed regions"); @@ -891,18 +891,18 @@ size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); - Atomic::store(&_affiliated_region_count, num_regions); - Atomic::store(&_used, num_bytes); + AtomicAccess::store(&_affiliated_region_count, num_regions); + AtomicAccess::store(&_used, num_bytes); _humongous_waste = humongous_waste; } void ShenandoahGeneration::increase_used(size_t bytes) { - Atomic::add(&_used, bytes); + AtomicAccess::add(&_used, bytes); } void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { if (bytes > 0) { - Atomic::add(&_humongous_waste, bytes); + AtomicAccess::add(&_humongous_waste, bytes); } } @@ -910,18 +910,18 @@ void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { if (bytes > 0) { assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), "Waste (%zu) cannot be negative (after subtracting %zu)", _humongous_waste, bytes); - Atomic::sub(&_humongous_waste, bytes); + AtomicAccess::sub(&_humongous_waste, bytes); } } void ShenandoahGeneration::decrease_used(size_t bytes) { assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_used >= bytes), "cannot reduce bytes used by generation below zero"); - Atomic::sub(&_used, bytes); + AtomicAccess::sub(&_used, bytes); } size_t ShenandoahGeneration::used_regions() const { - return Atomic::load(&_affiliated_region_count); + return AtomicAccess::load(&_affiliated_region_count); } size_t ShenandoahGeneration::free_unaffiliated_regions() const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 2b7aca342dad1..9d70d4d76f36c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -128,7 +128,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual size_t used_regions() const; virtual size_t used_regions_size() const; virtual size_t free_unaffiliated_regions() const; - size_t used() const override { return Atomic::load(&_used); } + size_t used() const override { return AtomicAccess::load(&_used); } size_t available() const override; size_t available_with_reserve() const; size_t used_including_humongous_waste() const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp index 1cf8b78ef1aae..ce8d96308baa9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp @@ -42,7 +42,7 @@ #include "logging/log.hpp" #include "memory/metaspaceStats.hpp" #include "memory/metaspaceUtils.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/events.hpp" ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 927c9e15dc527..c2dca09a344de 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -86,7 +86,7 @@ #include "nmt/memTracker.hpp" #include "oops/compressedOops.inline.hpp" #include "prims/jvmtiTagMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/java.hpp" @@ -698,7 +698,7 @@ size_t ShenandoahHeap::used() const { } size_t ShenandoahHeap::committed() const { - return Atomic::load(&_committed); + return AtomicAccess::load(&_committed); } void ShenandoahHeap::increase_committed(size_t bytes) { @@ -789,7 +789,7 @@ size_t ShenandoahHeap::max_capacity() const { } size_t ShenandoahHeap::soft_max_capacity() const { - size_t v = Atomic::load(&_soft_max_size); + size_t v = AtomicAccess::load(&_soft_max_size); assert(min_capacity() <= v && v <= max_capacity(), "Should be in bounds: %zu <= %zu <= %zu", min_capacity(), v, max_capacity()); @@ -800,7 +800,7 @@ void ShenandoahHeap::set_soft_max_capacity(size_t v) { assert(min_capacity() <= v && v <= max_capacity(), "Should be in bounds: %zu <= %zu <= %zu", min_capacity(), v, max_capacity()); - Atomic::store(&_soft_max_size, v); + AtomicAccess::store(&_soft_max_size, v); } size_t ShenandoahHeap::min_capacity() const { @@ -852,7 +852,7 @@ void ShenandoahHeap::notify_explicit_gc_requested() { } bool ShenandoahHeap::check_soft_max_changed() { - size_t new_soft_max = Atomic::load(&SoftMaxHeapSize); + size_t new_soft_max = AtomicAccess::load(&SoftMaxHeapSize); size_t old_soft_max = soft_max_capacity(); if (new_soft_max != old_soft_max) { new_soft_max = MAX2(min_capacity(), new_soft_max); @@ -1978,8 +1978,8 @@ class ShenandoahParallelHeapRegionTask : public WorkerTask { size_t stride = _stride; size_t max = _heap->num_regions(); - while (Atomic::load(&_index) < max) { - size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed); + while (AtomicAccess::load(&_index) < max) { + size_t cur = AtomicAccess::fetch_then_add(&_index, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 4de6ccead5171..ca368f5ed295f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -48,7 +48,7 @@ #include "gc/shenandoah/shenandoahWorkGroup.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/objectMonitor.inline.hpp" #include "runtime/prefetch.inline.hpp" @@ -60,7 +60,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() { } inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { - size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); + size_t new_index = AtomicAccess::add(&_index, (size_t) 1, memory_order_relaxed); // get_region() provides the bounds-check and returns null on OOB. return _heap->get_region(new_index - 1); } @@ -74,15 +74,15 @@ inline WorkerThreads* ShenandoahHeap::safepoint_workers() { } inline void ShenandoahHeap::notify_gc_progress() { - Atomic::store(&_gc_no_progress_count, (size_t) 0); + AtomicAccess::store(&_gc_no_progress_count, (size_t) 0); } inline void ShenandoahHeap::notify_gc_no_progress() { - Atomic::inc(&_gc_no_progress_count); + AtomicAccess::inc(&_gc_no_progress_count); } inline size_t ShenandoahHeap::get_gc_no_progress_count() const { - return Atomic::load(&_gc_no_progress_count); + return AtomicAccess::load(&_gc_no_progress_count); } inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) const { @@ -197,38 +197,38 @@ inline void ShenandoahHeap::conc_update_with_forwarded(T* p) { inline void ShenandoahHeap::atomic_update_oop(oop update, oop* addr, oop compare) { assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); - Atomic::cmpxchg(addr, compare, update, memory_order_release); + AtomicAccess::cmpxchg(addr, compare, update, memory_order_release); } inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, narrowOop compare) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop u = CompressedOops::encode(update); - Atomic::cmpxchg(addr, compare, u, memory_order_release); + AtomicAccess::cmpxchg(addr, compare, u, memory_order_release); } inline void ShenandoahHeap::atomic_update_oop(oop update, narrowOop* addr, oop compare) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop c = CompressedOops::encode(compare); narrowOop u = CompressedOops::encode(update); - Atomic::cmpxchg(addr, c, u, memory_order_release); + AtomicAccess::cmpxchg(addr, c, u, memory_order_release); } inline bool ShenandoahHeap::atomic_update_oop_check(oop update, oop* addr, oop compare) { assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); - return (oop) Atomic::cmpxchg(addr, compare, update, memory_order_release) == compare; + return (oop) AtomicAccess::cmpxchg(addr, compare, update, memory_order_release) == compare; } inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop u = CompressedOops::encode(update); - return (narrowOop) Atomic::cmpxchg(addr, compare, u, memory_order_release) == compare; + return (narrowOop) AtomicAccess::cmpxchg(addr, compare, u, memory_order_release) == compare; } inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, oop compare) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop c = CompressedOops::encode(compare); narrowOop u = CompressedOops::encode(update); - return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare; + return CompressedOops::decode(AtomicAccess::cmpxchg(addr, c, u, memory_order_release)) == compare; } // The memory ordering discussion above does not apply for methods that store nulls: @@ -237,18 +237,18 @@ inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr, inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) { assert(is_aligned(addr, HeapWordSize), "Address should be aligned: " PTR_FORMAT, p2i(addr)); - Atomic::cmpxchg(addr, compare, oop(), memory_order_relaxed); + AtomicAccess::cmpxchg(addr, compare, oop(), memory_order_relaxed); } inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, oop compare) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); narrowOop cmp = CompressedOops::encode(compare); - Atomic::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed); + AtomicAccess::cmpxchg(addr, cmp, narrowOop(), memory_order_relaxed); } inline void ShenandoahHeap::atomic_clear_oop(narrowOop* addr, narrowOop compare) { assert(is_aligned(addr, sizeof(narrowOop)), "Address should be aligned: " PTR_FORMAT, p2i(addr)); - Atomic::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed); + AtomicAccess::cmpxchg(addr, compare, narrowOop(), memory_order_relaxed); } inline bool ShenandoahHeap::cancelled_gc() const { @@ -423,11 +423,11 @@ inline void ShenandoahHeap::set_affiliation(ShenandoahHeapRegion* r, ShenandoahA #ifdef ASSERT assert_lock_for_affiliation(region_affiliation(r), new_affiliation); #endif - Atomic::store(_affiliations + r->index(), (uint8_t) new_affiliation); + AtomicAccess::store(_affiliations + r->index(), (uint8_t) new_affiliation); } inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(size_t index) const { - return (ShenandoahAffiliation) Atomic::load(_affiliations + index); + return (ShenandoahAffiliation) AtomicAccess::load(_affiliations + index); } inline bool ShenandoahHeap::requires_marking(const void* entry) const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 3eb7ccba35af3..df45a59433ec7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -43,7 +43,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" @@ -834,20 +834,20 @@ void ShenandoahHeapRegion::set_state(RegionState to) { evt.set_to(to); evt.commit(); } - Atomic::store(&_state, to); + AtomicAccess::store(&_state, to); } void ShenandoahHeapRegion::record_pin() { - Atomic::add(&_critical_pins, (size_t)1); + AtomicAccess::add(&_critical_pins, (size_t)1); } void ShenandoahHeapRegion::record_unpin() { assert(pin_count() > 0, "Region %zu should have non-zero pins", index()); - Atomic::sub(&_critical_pins, (size_t)1); + AtomicAccess::sub(&_critical_pins, (size_t)1); } size_t ShenandoahHeapRegion::pin_count() const { - return Atomic::load(&_critical_pins); + return AtomicAccess::load(&_critical_pins); } void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 4c99364bc6ed4..b6f65834c0763 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -216,7 +216,7 @@ class ShenandoahHeapRegion { bool is_alloc_allowed() const { auto cur_state = state(); return is_empty_state(cur_state) || cur_state == _regular || cur_state == _pinned; } bool is_stw_move_allowed() const { auto cur_state = state(); return cur_state == _regular || cur_state == _cset || (ShenandoahHumongousMoves && cur_state == _humongous_start); } - RegionState state() const { return Atomic::load(&_state); } + RegionState state() const { return AtomicAccess::load(&_state); } int state_ordinal() const { return region_state_to_ordinal(state()); } void record_pin(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index 53f86c8cc58b6..da1caf24266d0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -32,7 +32,7 @@ #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahOldGeneration.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocRequest &req, size_t alignment_in_bytes) { shenandoah_assert_heaplocked_or_safepoint(); @@ -138,15 +138,15 @@ inline void ShenandoahHeapRegion::increase_live_data_gc_words(size_t s) { } inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { - size_t new_live_data = Atomic::add(&_live_data, s, memory_order_relaxed); + size_t new_live_data = AtomicAccess::add(&_live_data, s, memory_order_relaxed); } inline void ShenandoahHeapRegion::clear_live_data() { - Atomic::store(&_live_data, (size_t)0); + AtomicAccess::store(&_live_data, (size_t)0); } inline size_t ShenandoahHeapRegion::get_live_data_words() const { - return Atomic::load(&_live_data); + return AtomicAccess::load(&_live_data); } inline size_t ShenandoahHeapRegion::get_live_data_bytes() const { @@ -178,14 +178,14 @@ inline size_t ShenandoahHeapRegion::garbage_before_padded_for_promote() const { } inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const { - HeapWord* watermark = Atomic::load_acquire(&_update_watermark); + HeapWord* watermark = AtomicAccess::load_acquire(&_update_watermark); assert(bottom() <= watermark && watermark <= top(), "within bounds"); return watermark; } inline void ShenandoahHeapRegion::set_update_watermark(HeapWord* w) { assert(bottom() <= w && w <= top(), "within bounds"); - Atomic::release_store(&_update_watermark, w); + AtomicAccess::release_store(&_update_watermark, w); } // Fast version that avoids synchronization, only to be used at safepoints. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp index 918e6bf1be626..aed3faef906b3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp @@ -32,7 +32,7 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/perfData.inline.hpp" #include "utilities/defaultStream.hpp" @@ -107,7 +107,7 @@ void ShenandoahHeapRegionCounters::update() { if (ShenandoahRegionSampling) { jlong current = nanos_to_millis(os::javaTimeNanos()); jlong last = _last_sample_millis; - if (current - last > ShenandoahRegionSamplingRate && Atomic::cmpxchg(&_last_sample_millis, last, current) == last) { + if (current - last > ShenandoahRegionSamplingRate && AtomicAccess::cmpxchg(&_last_sample_millis, last, current) == last) { ShenandoahHeap* heap = ShenandoahHeap::heap(); _status->set_value(encode_heap_status(heap)); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp index 368738fe5ead7..560de816db907 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.cpp @@ -27,7 +27,7 @@ #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/copy.hpp" ShenandoahHeapRegionSetIterator::ShenandoahHeapRegionSetIterator(const ShenandoahHeapRegionSet* const set) : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp b/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp index fcfe0d1d5d649..7a3b33f5fd06a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahLock.cpp @@ -24,7 +24,7 @@ #include "gc/shenandoah/shenandoahLock.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" #include "runtime/os.hpp" @@ -46,8 +46,8 @@ void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) { int ctr = os::is_MP() ? 0xFF : 0; int yields = 0; // Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread. - while (Atomic::load(&_state) == locked || - Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) { + while (AtomicAccess::load(&_state) == locked || + AtomicAccess::cmpxchg(&_state, unlocked, locked) != unlocked) { if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) { // Lightly contended, spin a little if no safepoint is pending. SpinPause(); @@ -113,11 +113,11 @@ ShenandoahReentrantLock::~ShenandoahReentrantLock() { void ShenandoahReentrantLock::lock() { Thread* const thread = Thread::current(); - Thread* const owner = Atomic::load(&_owner); + Thread* const owner = AtomicAccess::load(&_owner); if (owner != thread) { ShenandoahSimpleLock::lock(); - Atomic::store(&_owner, thread); + AtomicAccess::store(&_owner, thread); } _count++; @@ -130,13 +130,13 @@ void ShenandoahReentrantLock::unlock() { _count--; if (_count == 0) { - Atomic::store(&_owner, (Thread*)nullptr); + AtomicAccess::store(&_owner, (Thread*)nullptr); ShenandoahSimpleLock::unlock(); } } bool ShenandoahReentrantLock::owned_by_self() const { Thread* const thread = Thread::current(); - Thread* const owner = Atomic::load(&_owner); + Thread* const owner = AtomicAccess::load(&_owner); return owner == thread; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp index 0cef2414c0d99..fbdf49713544b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahLock.hpp @@ -48,26 +48,26 @@ class ShenandoahLock { ShenandoahLock() : _state(unlocked), _owner(nullptr) {}; void lock(bool allow_block_for_safepoint) { - assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock"); + assert(AtomicAccess::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock"); if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) || - (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked)) { + (AtomicAccess::cmpxchg(&_state, unlocked, locked) != unlocked)) { // 1. Java thread, and there is a pending safepoint. Dive into contended locking // immediately without trying anything else, and block. // 2. Fast lock fails, dive into contended lock handling. contended_lock(allow_block_for_safepoint); } - assert(Atomic::load(&_state) == locked, "must be locked"); - assert(Atomic::load(&_owner) == nullptr, "must not be owned"); - DEBUG_ONLY(Atomic::store(&_owner, Thread::current());) + assert(AtomicAccess::load(&_state) == locked, "must be locked"); + assert(AtomicAccess::load(&_owner) == nullptr, "must not be owned"); + DEBUG_ONLY(AtomicAccess::store(&_owner, Thread::current());) } void unlock() { - assert(Atomic::load(&_owner) == Thread::current(), "sanity"); - DEBUG_ONLY(Atomic::store(&_owner, (Thread*)nullptr);) + assert(AtomicAccess::load(&_owner) == Thread::current(), "sanity"); + DEBUG_ONLY(AtomicAccess::store(&_owner, (Thread*)nullptr);) OrderAccess::fence(); - Atomic::store(&_state, unlocked); + AtomicAccess::store(&_state, unlocked); } void contended_lock(bool allow_block_for_safepoint); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp index 56daf4c595671..472f571264864 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. and/or its affiliates. * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,7 +28,7 @@ #define SHARE_GC_SHENANDOAH_SHENANDOAHMARKBITMAP_HPP #include "memory/memRegion.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" class ShenandoahMarkBitMap { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp index f0a9752b614c4..637948e261510 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. and/or its affiliates. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,7 +28,7 @@ #include "gc/shenandoah/shenandoahMarkBitMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/count_trailing_zeros.hpp" inline size_t ShenandoahMarkBitMap::address_to_index(const HeapWord* addr) const { @@ -47,7 +47,7 @@ inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upg volatile bm_word_t* const addr = word_addr(bit); const bm_word_t mask = bit_mask(bit); const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1); - bm_word_t old_val = Atomic::load(addr); + bm_word_t old_val = AtomicAccess::load(addr); do { const bm_word_t new_val = old_val | mask; @@ -55,7 +55,7 @@ inline bool ShenandoahMarkBitMap::mark_strong(HeapWord* heap_addr, bool& was_upg assert(!was_upgraded, "Should be false already"); return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_relaxed); + const bm_word_t cur_val = AtomicAccess::cmpxchg(addr, old_val, new_val, memory_order_relaxed); if (cur_val == old_val) { was_upgraded = (cur_val & mask_weak) != 0; return true; // Success. @@ -72,7 +72,7 @@ inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) { volatile bm_word_t* const addr = word_addr(bit); const bm_word_t mask_weak = (bm_word_t)1 << (bit_in_word(bit) + 1); const bm_word_t mask_strong = (bm_word_t)1 << bit_in_word(bit); - bm_word_t old_val = Atomic::load(addr); + bm_word_t old_val = AtomicAccess::load(addr); do { if ((old_val & mask_strong) != 0) { @@ -82,7 +82,7 @@ inline bool ShenandoahMarkBitMap::mark_weak(HeapWord* heap_addr) { if (new_val == old_val) { return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order_relaxed); + const bm_word_t cur_val = AtomicAccess::cmpxchg(addr, old_val, new_val, memory_order_relaxed); if (cur_val == old_val) { return true; // Success. } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp index a08e7ef4b5f9e..55cec63f045a5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp @@ -434,7 +434,7 @@ void ShenandoahNMethodTableSnapshot::parallel_nmethods_do(NMethodClosure *f) { size_t max = (size_t)_limit; while (_claimed < max) { - size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); + size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; @@ -458,7 +458,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) ShenandoahNMethod** list = _list->list(); size_t max = (size_t)_limit; while (_claimed < max) { - size_t cur = Atomic::fetch_then_add(&_claimed, stride, memory_order_relaxed); + size_t cur = AtomicAccess::fetch_then_add(&_claimed, stride, memory_order_relaxed); size_t start = cur; size_t end = MIN2(cur + stride, max); if (start >= max) break; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp index be032a60db70a..32c63e9b18678 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahNumberSeq.cpp @@ -26,7 +26,7 @@ #include "gc/shenandoah/shenandoahNumberSeq.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" HdrSeq::HdrSeq() { _hdr = NEW_C_HEAP_ARRAY(int*, MagBuckets, mtInternal); @@ -202,7 +202,7 @@ void BinaryMagnitudeSeq::clear() { } void BinaryMagnitudeSeq::add(size_t val) { - Atomic::add(&_sum, val); + AtomicAccess::add(&_sum, val); int mag = log2i_graceful(val) + 1; @@ -217,7 +217,7 @@ void BinaryMagnitudeSeq::add(size_t val) { mag = BitsPerSize_t - 1; } - Atomic::add(&_mags[mag], (size_t)1); + AtomicAccess::add(&_mags[mag], (size_t)1); } size_t BinaryMagnitudeSeq::level(int level) const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 3aa3f6cb0caee..ac3107eb396ec 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -120,7 +120,7 @@ class ShenandoahPurgeSATBTask : public WorkerTask { ShenandoahProcessOldSATB processor(mark_queue); while (satb_queues.apply_closure_to_completed_buffer(&processor)) {} - Atomic::add(&_trashed_oops, processor.trashed_oops()); + AtomicAccess::add(&_trashed_oops, processor.trashed_oops()); } }; @@ -149,7 +149,7 @@ class ShenandoahTransferOldSATBTask : public WorkerTask { ShenandoahProcessOldSATB processor(mark_queue); while (_satb_queues.apply_closure_to_completed_buffer(&processor)) {} - Atomic::add(&_trashed_oops, processor.trashed_oops()); + AtomicAccess::add(&_trashed_oops, processor.trashed_oops()); } }; @@ -183,7 +183,7 @@ class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask { if (!r->oop_coalesce_and_fill(true)) { // Coalesce and fill has been preempted - Atomic::store(&_is_preempted, true); + AtomicAccess::store(&_is_preempted, true); return; } } @@ -191,7 +191,7 @@ class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask { // Value returned from is_completed() is only valid after all worker thread have terminated. bool is_completed() { - return !Atomic::load(&_is_preempted); + return !AtomicAccess::load(&_is_preempted); } }; @@ -240,21 +240,21 @@ void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) { void ShenandoahOldGeneration::reset_promoted_expended() { shenandoah_assert_heaplocked_or_safepoint(); - Atomic::store(&_promoted_expended, (size_t) 0); + AtomicAccess::store(&_promoted_expended, (size_t) 0); } size_t ShenandoahOldGeneration::expend_promoted(size_t increment) { shenandoah_assert_heaplocked_or_safepoint(); assert(get_promoted_expended() + increment <= get_promoted_reserve(), "Do not expend more promotion than budgeted"); - return Atomic::add(&_promoted_expended, increment); + return AtomicAccess::add(&_promoted_expended, increment); } size_t ShenandoahOldGeneration::unexpend_promoted(size_t decrement) { - return Atomic::sub(&_promoted_expended, decrement); + return AtomicAccess::sub(&_promoted_expended, decrement); } size_t ShenandoahOldGeneration::get_promoted_expended() const { - return Atomic::load(&_promoted_expended); + return AtomicAccess::load(&_promoted_expended); } bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp index 2bbce179af8d7..f8726386b5d31 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp @@ -33,7 +33,7 @@ #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" static ReferenceType reference_type(oop reference) { return InstanceKlass::cast(reference->klass())->reference_type(); @@ -121,7 +121,7 @@ inline oop reference_coop_decode_raw(oop v) { // CompressedOops::decode method that bypasses normal oop-ness checks. template static HeapWord* reference_referent_raw(oop reference) { - T raw_oop = Atomic::load(reference_referent_addr(reference)); + T raw_oop = AtomicAccess::load(reference_referent_addr(reference)); return cast_from_oop(reference_coop_decode_raw(raw_oop)); } @@ -505,7 +505,7 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc if (!CompressedOops::is_null(*list)) { oop head = lrb(CompressedOops::decode_not_null(*list)); shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier); - oop prev = Atomic::xchg(&_pending_list, head); + oop prev = AtomicAccess::xchg(&_pending_list, head); set_oop_field(p, prev); if (prev == nullptr) { // First to prepend to list, record tail @@ -520,14 +520,14 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc void ShenandoahReferenceProcessor::work() { // Process discovered references uint max_workers = ShenandoahHeap::heap()->max_workers(); - uint worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; + uint worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; while (worker_id < max_workers) { if (UseCompressedOops) { process_references(_ref_proc_thread_locals[worker_id], worker_id); } else { process_references(_ref_proc_thread_locals[worker_id], worker_id); } - worker_id = Atomic::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; + worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; } } @@ -560,7 +560,7 @@ class ShenandoahReferenceProcessorTask : public WorkerTask { void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { - Atomic::release_store_fence(&_iterate_discovered_list_id, 0U); + AtomicAccess::release_store_fence(&_iterate_discovered_list_id, 0U); // Process discovered lists ShenandoahReferenceProcessorTask task(phase, concurrent, this); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp index 8693046297d5a..a56113868be76 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp @@ -45,7 +45,7 @@ ShenandoahJavaThreadsIterator::ShenandoahJavaThreadsIterator(ShenandoahPhaseTimi } uint ShenandoahJavaThreadsIterator::claim() { - return Atomic::fetch_then_add(&_claimed, _stride, memory_order_relaxed); + return AtomicAccess::fetch_then_add(&_claimed, _stride, memory_order_relaxed); } void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp index 68bec5c2071bc..ce7cda984121a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp @@ -369,7 +369,7 @@ inline bool ShenandoahRegionChunkIterator::next(struct ShenandoahRegionChunk *as if (_index >= _total_chunks) { return false; } - size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed); + size_t new_index = AtomicAccess::add(&_index, (size_t) 1, memory_order_relaxed); if (new_index > _total_chunks) { // First worker that hits new_index == _total_chunks continues, other // contending workers return false. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp index 1d1c93599464a..127882201d780 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSharedVariables.hpp @@ -27,7 +27,7 @@ #include "gc/shenandoah/shenandoahPadding.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" typedef jbyte ShenandoahSharedValue; @@ -49,19 +49,19 @@ typedef struct ShenandoahSharedFlag { } void set() { - Atomic::release_store_fence(&value, (ShenandoahSharedValue)SET); + AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)SET); } void unset() { - Atomic::release_store_fence(&value, (ShenandoahSharedValue)UNSET); + AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)UNSET); } bool is_set() const { - return Atomic::load_acquire(&value) == SET; + return AtomicAccess::load_acquire(&value) == SET; } bool is_unset() const { - return Atomic::load_acquire(&value) == UNSET; + return AtomicAccess::load_acquire(&value) == UNSET; } void set_cond(bool val) { @@ -76,7 +76,7 @@ typedef struct ShenandoahSharedFlag { if (is_set()) { return false; } - ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET); + ShenandoahSharedValue old = AtomicAccess::cmpxchg(&value, (ShenandoahSharedValue)UNSET, (ShenandoahSharedValue)SET); return old == UNSET; // success } @@ -84,7 +84,7 @@ typedef struct ShenandoahSharedFlag { if (!is_set()) { return false; } - ShenandoahSharedValue old = Atomic::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET); + ShenandoahSharedValue old = AtomicAccess::cmpxchg(&value, (ShenandoahSharedValue)SET, (ShenandoahSharedValue)UNSET); return old == SET; // success } @@ -120,7 +120,7 @@ typedef struct ShenandoahSharedBitmap { assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; while (true) { - ShenandoahSharedValue ov = Atomic::load_acquire(&value); + ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value); // We require all bits of mask_val to be set if ((ov & mask_val) == mask_val) { // already set @@ -128,7 +128,7 @@ typedef struct ShenandoahSharedBitmap { } ShenandoahSharedValue nv = ov | mask_val; - if (Atomic::cmpxchg(&value, ov, nv) == ov) { + if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) { // successfully set: if value returned from cmpxchg equals ov, then nv has overwritten value. return; } @@ -139,14 +139,14 @@ typedef struct ShenandoahSharedBitmap { assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); ShenandoahSharedValue mask_val = (ShenandoahSharedValue) mask; while (true) { - ShenandoahSharedValue ov = Atomic::load_acquire(&value); + ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value); if ((ov & mask_val) == 0) { // already unset return; } ShenandoahSharedValue nv = ov & ~mask_val; - if (Atomic::cmpxchg(&value, ov, nv) == ov) { + if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) { // successfully unset return; } @@ -154,7 +154,7 @@ typedef struct ShenandoahSharedBitmap { } void clear() { - Atomic::release_store_fence(&value, (ShenandoahSharedValue)0); + AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)0); } // Returns true iff any bit set in mask is set in this.value. @@ -165,18 +165,18 @@ typedef struct ShenandoahSharedBitmap { // Returns true iff all bits set in mask are set in this.value. bool is_set_exactly(uint mask) const { assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); - uint uvalue = Atomic::load_acquire(&value); + uint uvalue = AtomicAccess::load_acquire(&value); return (uvalue & mask) == mask; } // Returns true iff all bits set in mask are unset in this.value. bool is_unset(uint mask) const { assert (mask < (sizeof(ShenandoahSharedValue) * CHAR_MAX), "sanity"); - return (Atomic::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0; + return (AtomicAccess::load_acquire(&value) & (ShenandoahSharedValue) mask) == 0; } bool is_clear() const { - return (Atomic::load_acquire(&value)) == 0; + return (AtomicAccess::load_acquire(&value)) == 0; } void set_cond(uint mask, bool val) { @@ -224,23 +224,23 @@ struct ShenandoahSharedEnumFlag { void set(T v) { assert (v >= 0, "sanity"); assert (v < (sizeof(EnumValueType) * CHAR_MAX), "sanity"); - Atomic::release_store_fence(&value, (EnumValueType)v); + AtomicAccess::release_store_fence(&value, (EnumValueType)v); } T get() const { - return (T)Atomic::load_acquire(&value); + return (T)AtomicAccess::load_acquire(&value); } T cmpxchg(T new_value, T expected) { assert (new_value >= 0, "sanity"); assert (new_value < (sizeof(EnumValueType) * CHAR_MAX), "sanity"); - return (T)Atomic::cmpxchg(&value, (EnumValueType)expected, (EnumValueType)new_value); + return (T)AtomicAccess::cmpxchg(&value, (EnumValueType)expected, (EnumValueType)new_value); } T xchg(T new_value) { assert (new_value >= 0, "sanity"); assert (new_value < (sizeof(EnumValueType) * CHAR_MAX), "sanity"); - return (T)Atomic::xchg(&value, (EnumValueType)new_value); + return (T)AtomicAccess::xchg(&value, (EnumValueType)new_value); } volatile EnumValueType* addr_of() { @@ -273,17 +273,17 @@ typedef struct ShenandoahSharedSemaphore { ShenandoahSharedSemaphore(uint tokens) { assert(tokens <= max_tokens(), "sanity"); - Atomic::release_store_fence(&value, (ShenandoahSharedValue)tokens); + AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)tokens); } bool try_acquire() { while (true) { - ShenandoahSharedValue ov = Atomic::load_acquire(&value); + ShenandoahSharedValue ov = AtomicAccess::load_acquire(&value); if (ov == 0) { return false; } ShenandoahSharedValue nv = ov - 1; - if (Atomic::cmpxchg(&value, ov, nv) == ov) { + if (AtomicAccess::cmpxchg(&value, ov, nv) == ov) { // successfully set return true; } @@ -291,7 +291,7 @@ typedef struct ShenandoahSharedSemaphore { } void claim_all() { - Atomic::release_store_fence(&value, (ShenandoahSharedValue)0); + AtomicAccess::release_store_fence(&value, (ShenandoahSharedValue)0); } } ShenandoahSharedSemaphore; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp index af661fd1dc436..969edafbf752d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2024, Red Hat, Inc. All rights reserved. - * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/shared/taskTerminator.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" #include "nmt/memTag.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutex.hpp" #include "utilities/debug.hpp" @@ -340,7 +340,7 @@ T* ParallelClaimableQueueSet::claim_next() { return nullptr; } - jint index = Atomic::add(&_claimed_index, 1, memory_order_relaxed); + jint index = AtomicAccess::add(&_claimed_index, 1, memory_order_relaxed); if (index <= size) { return GenericTaskQueueSet::queue((uint)index - 1); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 33b8744be3d88..c84a2a656771b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -42,7 +42,7 @@ #include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/compressedOops.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "runtime/threads.hpp" #include "utilities/align.hpp" @@ -193,7 +193,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // skip break; case ShenandoahVerifier::_verify_liveness_complete: - Atomic::add(&_ld[obj_reg->index()], (uint) ShenandoahForwarding::size(obj), memory_order_relaxed); + AtomicAccess::add(&_ld[obj_reg->index()], (uint) ShenandoahForwarding::size(obj), memory_order_relaxed); // fallthrough for fast failure for un-live regions: case ShenandoahVerifier::_verify_liveness_conservative: check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live() || @@ -622,7 +622,7 @@ class ShenandoahVerifierReachableTask : public WorkerTask { } } - Atomic::add(&_processed, processed, memory_order_relaxed); + AtomicAccess::add(&_processed, processed, memory_order_relaxed); } }; @@ -669,7 +669,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { }; size_t processed() { - return Atomic::load(&_processed); + return AtomicAccess::load(&_processed); } void work(uint worker_id) override { @@ -684,7 +684,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { _options); while (true) { - size_t v = Atomic::fetch_then_add(&_claimed, 1u, memory_order_relaxed); + size_t v = AtomicAccess::fetch_then_add(&_claimed, 1u, memory_order_relaxed); if (v < _heap->num_regions()) { ShenandoahHeapRegion* r = _heap->get_region(v); if (!in_generation(r)) { @@ -712,7 +712,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { if (_heap->gc_generation()->complete_marking_context()->is_marked(cast_to_oop(obj))) { verify_and_follow(obj, stack, cl, &processed); } - Atomic::add(&_processed, processed, memory_order_relaxed); + AtomicAccess::add(&_processed, processed, memory_order_relaxed); } virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) { @@ -745,7 +745,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { } } - Atomic::add(&_processed, processed, memory_order_relaxed); + AtomicAccess::add(&_processed, processed, memory_order_relaxed); } void verify_and_follow(HeapWord *addr, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl, size_t *processed) { @@ -1023,12 +1023,12 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, if (r->is_humongous()) { // For humongous objects, test if start region is marked live, and if so, // all humongous regions in that chain have live data equal to their "used". - juint start_live = Atomic::load(&ld[r->humongous_start_region()->index()]); + juint start_live = AtomicAccess::load(&ld[r->humongous_start_region()->index()]); if (start_live > 0) { verf_live = (juint)(r->used() / HeapWordSize); } } else { - verf_live = Atomic::load(&ld[r->index()]); + verf_live = AtomicAccess::load(&ld[r->index()]); } size_t reg_live = r->get_live_data_words(); diff --git a/src/hotspot/share/gc/z/zAbort.cpp b/src/hotspot/share/gc/z/zAbort.cpp index 7e4f23c1c7a8a..3310793f73023 100644 --- a/src/hotspot/share/gc/z/zAbort.cpp +++ b/src/hotspot/share/gc/z/zAbort.cpp @@ -22,10 +22,10 @@ */ #include "gc/z/zAbort.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" volatile bool ZAbort::_should_abort = false; void ZAbort::abort() { - Atomic::store(&_should_abort, true); + AtomicAccess::store(&_should_abort, true); } diff --git a/src/hotspot/share/gc/z/zAbort.inline.hpp b/src/hotspot/share/gc/z/zAbort.inline.hpp index 0037f7ec4488d..37503e25f7063 100644 --- a/src/hotspot/share/gc/z/zAbort.inline.hpp +++ b/src/hotspot/share/gc/z/zAbort.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,10 @@ #include "gc/z/zAbort.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline bool ZAbort::should_abort() { - return Atomic::load(&_should_abort); + return AtomicAccess::load(&_should_abort); } #endif // SHARE_GC_Z_ZABORT_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zAddress.inline.hpp b/src/hotspot/share/gc/z/zAddress.inline.hpp index 98f6900cbf0bf..64beb5ba35d6f 100644 --- a/src/hotspot/share/gc/z/zAddress.inline.hpp +++ b/src/hotspot/share/gc/z/zAddress.inline.hpp @@ -30,7 +30,7 @@ #include "gc/z/zGlobals.hpp" #include "oops/oop.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/align.hpp" #include "utilities/checkedCast.hpp" #include "utilities/globalDefinitions.hpp" @@ -475,7 +475,7 @@ inline uintptr_t untype(zaddress addr) { inline void dereferenceable_test(zaddress addr) { if (ZVerifyOops && !is_null(addr)) { // Intentionally crash if the address is pointing into unmapped memory - (void)Atomic::load((int*)(uintptr_t)addr); + (void)AtomicAccess::load((int*)(uintptr_t)addr); } } #endif diff --git a/src/hotspot/share/gc/z/zArray.hpp b/src/hotspot/share/gc/z/zArray.hpp index 9ef911bb1b56f..2b728bdf20810 100644 --- a/src/hotspot/share/gc/z/zArray.hpp +++ b/src/hotspot/share/gc/z/zArray.hpp @@ -25,7 +25,7 @@ #define SHARE_GC_Z_ZARRAY_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" #include "utilities/growableArray.hpp" diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp index bf606a88e68ec..9e2bc19118e06 100644 --- a/src/hotspot/share/gc/z/zArray.inline.hpp +++ b/src/hotspot/share/gc/z/zArray.inline.hpp @@ -27,7 +27,7 @@ #include "gc/z/zArray.hpp" #include "gc/z/zLock.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" template ZArraySlice::ZArraySlice(T* data, int len) @@ -130,7 +130,7 @@ inline bool ZArrayIteratorImpl::next_serial(size_t* index) { template inline bool ZArrayIteratorImpl::next_parallel(size_t* index) { - const size_t claimed_index = Atomic::fetch_then_add(&_next, 1u, memory_order_relaxed); + const size_t claimed_index = AtomicAccess::fetch_then_add(&_next, 1u, memory_order_relaxed); if (claimed_index < _end) { *index = claimed_index; diff --git a/src/hotspot/share/gc/z/zBarrier.inline.hpp b/src/hotspot/share/gc/z/zBarrier.inline.hpp index a3eb7a9ca67d6..b5923f0162837 100644 --- a/src/hotspot/share/gc/z/zBarrier.inline.hpp +++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ #include "gc/z/zResurrection.inline.hpp" #include "gc/z/zVerify.hpp" #include "oops/oop.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" // A self heal must always "upgrade" the address metadata bits in // accordance with the metadata bits state machine. The following @@ -93,7 +93,7 @@ inline void ZBarrier::self_heal(ZBarrierFastPath fast_path, volatile zpointer* p assert_transition_monotonicity(ptr, heal_ptr); // Heal - const zpointer prev_ptr = Atomic::cmpxchg(p, ptr, heal_ptr, memory_order_relaxed); + const zpointer prev_ptr = AtomicAccess::cmpxchg(p, ptr, heal_ptr, memory_order_relaxed); if (prev_ptr == ptr) { // Success return; @@ -365,7 +365,7 @@ inline void ZBarrier::remap_young_relocated(volatile zpointer* p, zpointer o) { } inline zpointer ZBarrier::load_atomic(volatile zpointer* p) { - const zpointer ptr = Atomic::load(p); + const zpointer ptr = AtomicAccess::load(p); assert_is_valid(ptr); return ptr; } diff --git a/src/hotspot/share/gc/z/zBarrierSet.cpp b/src/hotspot/share/gc/z/zBarrierSet.cpp index 43ea791a260ba..c71c404712ca1 100644 --- a/src/hotspot/share/gc/z/zBarrierSet.cpp +++ b/src/hotspot/share/gc/z/zBarrierSet.cpp @@ -163,7 +163,7 @@ void ZBarrierSet::clone_obj_array(objArrayOop src_obj, objArrayOop dst_obj) { // We avoid healing here because the store below colors the pointer store good, // hence avoiding the cost of a CAS. ZBarrier::store_barrier_on_heap_oop_field(dst, false /* heal */); - Atomic::store(dst, ZAddress::store_good(elem)); + AtomicAccess::store(dst, ZAddress::store_good(elem)); } } diff --git a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp index f7baf85efbfa1..16f2a303cb2e5 100644 --- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -332,7 +332,7 @@ template inline void ZBarrierSet::AccessBarrier::oop_copy_one(zpointer* dst, zpointer* src) { const zaddress obj = oop_copy_one_barriers(dst, src); - Atomic::store(dst, ZAddress::store_good(obj)); + AtomicAccess::store(dst, ZAddress::store_good(obj)); } template @@ -344,7 +344,7 @@ inline bool ZBarrierSet::AccessBarrier::oop_copy_one_ch return false; } - Atomic::store(dst, ZAddress::store_good(obj)); + AtomicAccess::store(dst, ZAddress::store_good(obj)); return true; } @@ -409,7 +409,7 @@ class ZColorStoreGoodOopClosure : public BasicOopIterateClosure { volatile zpointer* const p = (volatile zpointer*)p_; const zpointer ptr = ZBarrier::load_atomic(p); const zaddress addr = ZPointer::uncolor(ptr); - Atomic::store(p, ZAddress::store_good(addr)); + AtomicAccess::store(p, ZAddress::store_good(addr)); } virtual void do_oop(narrowOop* p) { diff --git a/src/hotspot/share/gc/z/zBitMap.inline.hpp b/src/hotspot/share/gc/z/zBitMap.inline.hpp index 2b5ad91932b00..4cd2827bf1a72 100644 --- a/src/hotspot/share/gc/z/zBitMap.inline.hpp +++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/z/zBitMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/debug.hpp" @@ -70,7 +70,7 @@ inline bool ZBitMap::par_set_bit_pair_strong(idx_t bit, bool& inc_live) { inc_live = false; return false; } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val); + const bm_word_t cur_val = AtomicAccess::cmpxchg(addr, old_val, new_val); if (cur_val == old_val) { // Success const bm_word_t marked_mask = bit_mask(bit); diff --git a/src/hotspot/share/gc/z/zContinuation.cpp b/src/hotspot/share/gc/z/zContinuation.cpp index 6c02f64528c28..35e40dabf7934 100644 --- a/src/hotspot/share/gc/z/zContinuation.cpp +++ b/src/hotspot/share/gc/z/zContinuation.cpp @@ -25,7 +25,7 @@ #include "gc/z/zBarrier.inline.hpp" #include "gc/z/zContinuation.inline.hpp" #include "gc/z/zStackChunkGCData.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" static zpointer materialize_zpointer(stackChunkOop chunk, void* addr) { volatile uintptr_t* const value_addr = (volatile uintptr_t*)addr; @@ -46,7 +46,7 @@ static zpointer materialize_zpointer(stackChunkOop chunk, void* addr) { // load the oop once and perform all checks on that loaded copy. // Load once - const uintptr_t value = Atomic::load(value_addr); + const uintptr_t value = AtomicAccess::load(value_addr); if ((value & ~ZPointerAllMetadataMask) == 0) { // Must be null of some sort - either zaddress or zpointer diff --git a/src/hotspot/share/gc/z/zForwarding.cpp b/src/hotspot/share/gc/z/zForwarding.cpp index df0e986d2c596..820bb9dbc352d 100644 --- a/src/hotspot/share/gc/z/zForwarding.cpp +++ b/src/hotspot/share/gc/z/zForwarding.cpp @@ -29,7 +29,7 @@ #include "gc/z/zStat.hpp" #include "gc/z/zUtils.inline.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/align.hpp" // @@ -50,7 +50,7 @@ // bool ZForwarding::claim() { - return Atomic::cmpxchg(&_claimed, false, true) == false; + return AtomicAccess::cmpxchg(&_claimed, false, true) == false; } void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) { @@ -60,7 +60,7 @@ void ZForwarding::in_place_relocation_start(zoffset relocated_watermark) { // Support for ZHeap::is_in checks of from-space objects // in a page that is in-place relocating - Atomic::store(&_in_place_thread, Thread::current()); + AtomicAccess::store(&_in_place_thread, Thread::current()); _in_place_top_at_start = _page->top(); } @@ -76,17 +76,17 @@ void ZForwarding::in_place_relocation_finish() { } // Disable relaxed ZHeap::is_in checks - Atomic::store(&_in_place_thread, (Thread*)nullptr); + AtomicAccess::store(&_in_place_thread, (Thread*)nullptr); } bool ZForwarding::in_place_relocation_is_below_top_at_start(zoffset offset) const { // Only the relocating thread is allowed to know about the old relocation top. - return Atomic::load(&_in_place_thread) == Thread::current() && offset < _in_place_top_at_start; + return AtomicAccess::load(&_in_place_thread) == Thread::current() && offset < _in_place_top_at_start; } bool ZForwarding::retain_page(ZRelocateQueue* queue) { for (;;) { - const int32_t ref_count = Atomic::load_acquire(&_ref_count); + const int32_t ref_count = AtomicAccess::load_acquire(&_ref_count); if (ref_count == 0) { // Released @@ -101,7 +101,7 @@ bool ZForwarding::retain_page(ZRelocateQueue* queue) { return false; } - if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) { + if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count + 1) == ref_count) { // Retained return true; } @@ -110,11 +110,11 @@ bool ZForwarding::retain_page(ZRelocateQueue* queue) { void ZForwarding::in_place_relocation_claim_page() { for (;;) { - const int32_t ref_count = Atomic::load(&_ref_count); + const int32_t ref_count = AtomicAccess::load(&_ref_count); assert(ref_count > 0, "Invalid state"); // Invert reference count - if (Atomic::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) { + if (AtomicAccess::cmpxchg(&_ref_count, ref_count, -ref_count) != ref_count) { continue; } @@ -122,7 +122,7 @@ void ZForwarding::in_place_relocation_claim_page() { // and we have now claimed the page. Otherwise we wait until it is claimed. if (ref_count != 1) { ZLocker locker(&_ref_lock); - while (Atomic::load_acquire(&_ref_count) != -1) { + while (AtomicAccess::load_acquire(&_ref_count) != -1) { _ref_lock.wait(); } } @@ -134,12 +134,12 @@ void ZForwarding::in_place_relocation_claim_page() { void ZForwarding::release_page() { for (;;) { - const int32_t ref_count = Atomic::load(&_ref_count); + const int32_t ref_count = AtomicAccess::load(&_ref_count); assert(ref_count != 0, "Invalid state"); if (ref_count > 0) { // Decrement reference count - if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) { + if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count - 1) != ref_count) { continue; } @@ -152,7 +152,7 @@ void ZForwarding::release_page() { } } else { // Increment reference count - if (Atomic::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) { + if (AtomicAccess::cmpxchg(&_ref_count, ref_count, ref_count + 1) != ref_count) { continue; } @@ -171,9 +171,9 @@ void ZForwarding::release_page() { ZPage* ZForwarding::detach_page() { // Wait until released - if (Atomic::load_acquire(&_ref_count) != 0) { + if (AtomicAccess::load_acquire(&_ref_count) != 0) { ZLocker locker(&_ref_lock); - while (Atomic::load_acquire(&_ref_count) != 0) { + while (AtomicAccess::load_acquire(&_ref_count) != 0) { _ref_lock.wait(); } } @@ -182,16 +182,16 @@ ZPage* ZForwarding::detach_page() { } ZPage* ZForwarding::page() { - assert(Atomic::load(&_ref_count) != 0, "The page has been released/detached"); + assert(AtomicAccess::load(&_ref_count) != 0, "The page has been released/detached"); return _page; } void ZForwarding::mark_done() { - Atomic::store(&_done, true); + AtomicAccess::store(&_done, true); } bool ZForwarding::is_done() const { - return Atomic::load(&_done); + return AtomicAccess::load(&_done); } // @@ -288,7 +288,7 @@ void ZForwarding::relocated_remembered_fields_publish() { // used to have remembered set entries. Now publish the fields to // the YC. - const ZPublishState res = Atomic::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::published); + const ZPublishState res = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::published); // none: OK to publish // published: Not possible - this operation makes this transition @@ -319,7 +319,7 @@ void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() { // Invariant: The page is being retained assert(ZGeneration::young()->is_phase_mark(), "Only called when"); - const ZPublishState res = Atomic::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::reject); + const ZPublishState res = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::none, ZPublishState::reject); // none: OC has not completed relocation // published: OC has completed and published all relocated remembered fields @@ -340,7 +340,7 @@ void ZForwarding::relocated_remembered_fields_notify_concurrent_scan_of() { // OC relocation already collected and published fields // Still notify concurrent scanning and reject the collected data from the OC - const ZPublishState res2 = Atomic::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::published, ZPublishState::reject); + const ZPublishState res2 = AtomicAccess::cmpxchg(&_relocated_remembered_fields_state, ZPublishState::published, ZPublishState::reject); assert(res2 == ZPublishState::published, "Should not fail"); log_debug(gc, remset)("Forwarding remset eager and reject: " PTR_FORMAT " " PTR_FORMAT, untype(start()), untype(end())); diff --git a/src/hotspot/share/gc/z/zForwarding.inline.hpp b/src/hotspot/share/gc/z/zForwarding.inline.hpp index 43558018793ff..45b5d495e797d 100644 --- a/src/hotspot/share/gc/z/zForwarding.inline.hpp +++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp @@ -36,7 +36,7 @@ #include "gc/z/zPage.inline.hpp" #include "gc/z/zUtils.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/powerOfTwo.hpp" @@ -196,7 +196,7 @@ void ZForwarding::oops_do_in_forwarded_via_table(Function function) { } inline bool ZForwarding::in_place_relocation() const { - assert(Atomic::load(&_ref_count) != 0, "The page has been released/detached"); + assert(AtomicAccess::load(&_ref_count) != 0, "The page has been released/detached"); return _in_place; } @@ -207,7 +207,7 @@ inline ZForwardingEntry* ZForwarding::entries() const { inline ZForwardingEntry ZForwarding::at(ZForwardingCursor* cursor) const { // Load acquire for correctness with regards to // accesses to the contents of the forwarded object. - return Atomic::load_acquire(entries() + *cursor); + return AtomicAccess::load_acquire(entries() + *cursor); } inline ZForwardingEntry ZForwarding::first(uintptr_t from_index, ZForwardingCursor* cursor) const { @@ -273,7 +273,7 @@ inline zoffset ZForwarding::insert(uintptr_t from_index, zoffset to_offset, ZFor OrderAccess::release(); for (;;) { - const ZForwardingEntry prev_entry = Atomic::cmpxchg(entries() + *cursor, old_entry, new_entry, memory_order_relaxed); + const ZForwardingEntry prev_entry = AtomicAccess::cmpxchg(entries() + *cursor, old_entry, new_entry, memory_order_relaxed); if (!prev_entry.populated()) { // Success return to_offset; @@ -307,7 +307,7 @@ inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer* // Invariant: Page is being retained assert(ZGeneration::young()->is_phase_mark(), "Only called when"); - const ZPublishState res = Atomic::load(&_relocated_remembered_fields_state); + const ZPublishState res = AtomicAccess::load(&_relocated_remembered_fields_state); // none: Gather remembered fields // published: Have already published fields - not possible since they haven't been @@ -327,7 +327,7 @@ inline void ZForwarding::relocated_remembered_fields_register(volatile zpointer* // Returns true iff the page is being (or about to be) relocated by the OC // while the YC gathered the remembered fields of the "from" page. inline bool ZForwarding::relocated_remembered_fields_is_concurrently_scanned() const { - return Atomic::load(&_relocated_remembered_fields_state) == ZPublishState::reject; + return AtomicAccess::load(&_relocated_remembered_fields_state) == ZPublishState::reject; } template @@ -335,7 +335,7 @@ inline void ZForwarding::relocated_remembered_fields_apply_to_published(Function // Invariant: Page is not being retained assert(ZGeneration::young()->is_phase_mark(), "Only called when"); - const ZPublishState res = Atomic::load_acquire(&_relocated_remembered_fields_state); + const ZPublishState res = AtomicAccess::load_acquire(&_relocated_remembered_fields_state); // none: Nothing published - page had already been relocated before YC started // published: OC relocated and published relocated remembered fields @@ -363,14 +363,14 @@ inline void ZForwarding::relocated_remembered_fields_apply_to_published(Function // collection. Mark that it is unsafe (and unnecessary) to call scan_page // on the page in the page table. assert(res != ZPublishState::accept, "Unexpected"); - Atomic::store(&_relocated_remembered_fields_state, ZPublishState::reject); + AtomicAccess::store(&_relocated_remembered_fields_state, ZPublishState::reject); } else { log_debug(gc, remset)("scan_forwarding failed retain safe " PTR_FORMAT, untype(start())); // Guaranteed that the page was fully relocated and removed from page table. // Because of this we can signal to scan_page that any page found in page table // of the same slot as the current forwarding is a page that is safe to scan, // and in fact must be scanned. - Atomic::store(&_relocated_remembered_fields_state, ZPublishState::accept); + AtomicAccess::store(&_relocated_remembered_fields_state, ZPublishState::accept); } } diff --git a/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp b/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp index 42d006a5b3734..dc71507175c74 100644 --- a/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp +++ b/src/hotspot/share/gc/z/zForwardingAllocator.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/z/zForwardingAllocator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" inline size_t ZForwardingAllocator::size() const { @@ -38,7 +38,7 @@ inline bool ZForwardingAllocator::is_full() const { } inline void* ZForwardingAllocator::alloc(size_t size) { - char* const addr = Atomic::fetch_then_add(&_top, size); + char* const addr = AtomicAccess::fetch_then_add(&_top, size); assert(addr + size <= _end, "Allocation should never fail"); return addr; } diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index 27697be68375e..4a17e80d665bb 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -56,7 +56,7 @@ #include "logging/log.hpp" #include "memory/universe.hpp" #include "prims/jvmtiTagMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/handshake.hpp" #include "runtime/safepoint.hpp" @@ -287,7 +287,7 @@ size_t ZGeneration::freed() const { } void ZGeneration::increase_freed(size_t size) { - Atomic::add(&_freed, size, memory_order_relaxed); + AtomicAccess::add(&_freed, size, memory_order_relaxed); } size_t ZGeneration::promoted() const { @@ -295,7 +295,7 @@ size_t ZGeneration::promoted() const { } void ZGeneration::increase_promoted(size_t size) { - Atomic::add(&_promoted, size, memory_order_relaxed); + AtomicAccess::add(&_promoted, size, memory_order_relaxed); } size_t ZGeneration::compacted() const { @@ -303,7 +303,7 @@ size_t ZGeneration::compacted() const { } void ZGeneration::increase_compacted(size_t size) { - Atomic::add(&_compacted, size, memory_order_relaxed); + AtomicAccess::add(&_compacted, size, memory_order_relaxed); } ConcurrentGCTimer* ZGeneration::gc_timer() const { diff --git a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp index 21be6a3d80a86..535a9dfc15a55 100644 --- a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp +++ b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp @@ -30,7 +30,7 @@ #include "gc/z/zArray.inline.hpp" #include "gc/z/zGlobals.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -57,7 +57,7 @@ inline size_t ZGranuleMap::index_for_offset(zoffset offset) const { template inline T ZGranuleMap::at(size_t index) const { assert(index < _size, "Invalid index"); - return Atomic::load(_map + index); + return AtomicAccess::load(_map + index); } template @@ -69,7 +69,7 @@ inline T ZGranuleMap::get(zoffset offset) const { template inline void ZGranuleMap::put(zoffset offset, T value) { const size_t index = index_for_offset(offset); - Atomic::store(_map + index, value); + AtomicAccess::store(_map + index, value); } template @@ -79,20 +79,20 @@ inline void ZGranuleMap::put(zoffset offset, size_t size, T value) { const size_t start_index = index_for_offset(offset); const size_t end_index = start_index + (size >> ZGranuleSizeShift); for (size_t index = start_index; index < end_index; index++) { - Atomic::store(_map + index, value); + AtomicAccess::store(_map + index, value); } } template inline T ZGranuleMap::get_acquire(zoffset offset) const { const size_t index = index_for_offset(offset); - return Atomic::load_acquire(_map + index); + return AtomicAccess::load_acquire(_map + index); } template inline void ZGranuleMap::release_put(zoffset offset, T value) { const size_t index = index_for_offset(offset); - Atomic::release_store(_map + index, value); + AtomicAccess::release_store(_map + index, value); } template diff --git a/src/hotspot/share/gc/z/zHeapIterator.cpp b/src/hotspot/share/gc/z/zHeapIterator.cpp index 63bede6143ba1..8e423fe0fca84 100644 --- a/src/hotspot/share/gc/z/zHeapIterator.cpp +++ b/src/hotspot/share/gc/z/zHeapIterator.cpp @@ -144,7 +144,7 @@ class ZHeapIteratorUncoloredRootOopClosure : public OopClosure { const ZHeapIteratorContext& _context; oop load_oop(oop* p) { - const oop o = Atomic::load(p); + const oop o = AtomicAccess::load(p); check_is_valid_zaddress(o); return RawAccess<>::oop_load(p); } diff --git a/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp b/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp index 163dd2ad685ae..216bd40a84a9e 100644 --- a/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp +++ b/src/hotspot/share/gc/z/zIndexDistributor.inline.hpp @@ -28,7 +28,7 @@ #include "gc/shared/gc_globals.hpp" #include "gc/z/zGlobals.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" #include "utilities/align.hpp" @@ -45,7 +45,7 @@ class ZIndexDistributorStriped : public CHeapObj { char _mem[MemSize + ZCacheLineSize]; int claim_stripe() { - return Atomic::fetch_then_add(&_claim_stripe, 1, memory_order_relaxed); + return AtomicAccess::fetch_then_add(&_claim_stripe, 1, memory_order_relaxed); } volatile int* claim_addr(int index) { @@ -66,7 +66,7 @@ class ZIndexDistributorStriped : public CHeapObj { // Use claiming for (int i; (i = claim_stripe()) < StripeCount;) { - for (int index; (index = Atomic::fetch_then_add(claim_addr(i), 1, memory_order_relaxed)) < stripe_max;) { + for (int index; (index = AtomicAccess::fetch_then_add(claim_addr(i), 1, memory_order_relaxed)) < stripe_max;) { if (!function(i * stripe_max + index)) { return; } @@ -75,7 +75,7 @@ class ZIndexDistributorStriped : public CHeapObj { // Use stealing for (int i = 0; i < StripeCount; i++) { - for (int index; (index = Atomic::fetch_then_add(claim_addr(i), 1, memory_order_relaxed)) < stripe_max;) { + for (int index; (index = AtomicAccess::fetch_then_add(claim_addr(i), 1, memory_order_relaxed)) < stripe_max;) { if (!function(i * stripe_max + index)) { return; } @@ -172,7 +172,7 @@ class ZIndexDistributorClaimTree : public CHeapObj { // Claim functions int claim(int index) { - return Atomic::fetch_then_add(&_claim_array[index], 1, memory_order_relaxed); + return AtomicAccess::fetch_then_add(&_claim_array[index], 1, memory_order_relaxed); } int claim_at(int* indices, int level) { diff --git a/src/hotspot/share/gc/z/zJNICritical.cpp b/src/hotspot/share/gc/z/zJNICritical.cpp index 608b8f2249313..01c0a0dd6b82c 100644 --- a/src/hotspot/share/gc/z/zJNICritical.cpp +++ b/src/hotspot/share/gc/z/zJNICritical.cpp @@ -24,7 +24,7 @@ #include "gc/z/zJNICritical.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zStat.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/thread.inline.hpp" #include "utilities/debug.hpp" @@ -56,12 +56,12 @@ void ZJNICritical::initialize() { void ZJNICritical::block() { for (;;) { - const int64_t count = Atomic::load_acquire(&_count); + const int64_t count = AtomicAccess::load_acquire(&_count); if (count < 0) { // Already blocked, wait until unblocked ZLocker locker(_lock); - while (Atomic::load_acquire(&_count) < 0) { + while (AtomicAccess::load_acquire(&_count) < 0) { _lock->wait(); } @@ -70,7 +70,7 @@ void ZJNICritical::block() { } // Increment and invert count - if (Atomic::cmpxchg(&_count, count, -(count + 1)) != count) { + if (AtomicAccess::cmpxchg(&_count, count, -(count + 1)) != count) { continue; } @@ -80,7 +80,7 @@ void ZJNICritical::block() { if (count != 0) { // Wait until blocked ZLocker locker(_lock); - while (Atomic::load_acquire(&_count) != -1) { + while (AtomicAccess::load_acquire(&_count) != -1) { _lock->wait(); } } @@ -91,18 +91,18 @@ void ZJNICritical::block() { } void ZJNICritical::unblock() { - const int64_t count = Atomic::load_acquire(&_count); + const int64_t count = AtomicAccess::load_acquire(&_count); assert(count == -1, "Invalid count"); // Notify unblocked ZLocker locker(_lock); - Atomic::release_store(&_count, (int64_t)0); + AtomicAccess::release_store(&_count, (int64_t)0); _lock->notify_all(); } void ZJNICritical::enter_inner(JavaThread* thread) { for (;;) { - const int64_t count = Atomic::load_acquire(&_count); + const int64_t count = AtomicAccess::load_acquire(&_count); if (count < 0) { // Wait until unblocked @@ -112,7 +112,7 @@ void ZJNICritical::enter_inner(JavaThread* thread) { ThreadBlockInVM tbivm(thread); ZLocker locker(_lock); - while (Atomic::load_acquire(&_count) < 0) { + while (AtomicAccess::load_acquire(&_count) < 0) { _lock->wait(); } @@ -121,7 +121,7 @@ void ZJNICritical::enter_inner(JavaThread* thread) { } // Increment count - if (Atomic::cmpxchg(&_count, count, count + 1) != count) { + if (AtomicAccess::cmpxchg(&_count, count, count + 1) != count) { continue; } @@ -142,17 +142,17 @@ void ZJNICritical::enter(JavaThread* thread) { void ZJNICritical::exit_inner() { for (;;) { - const int64_t count = Atomic::load_acquire(&_count); + const int64_t count = AtomicAccess::load_acquire(&_count); assert(count != 0, "Invalid count"); if (count > 0) { // No block in progress, decrement count - if (Atomic::cmpxchg(&_count, count, count - 1) != count) { + if (AtomicAccess::cmpxchg(&_count, count, count - 1) != count) { continue; } } else { // Block in progress, increment count - if (Atomic::cmpxchg(&_count, count, count + 1) != count) { + if (AtomicAccess::cmpxchg(&_count, count, count + 1) != count) { continue; } diff --git a/src/hotspot/share/gc/z/zLiveMap.cpp b/src/hotspot/share/gc/z/zLiveMap.cpp index 18b44dacab9f7..c4ae41b08739a 100644 --- a/src/hotspot/share/gc/z/zLiveMap.cpp +++ b/src/hotspot/share/gc/z/zLiveMap.cpp @@ -27,7 +27,7 @@ #include "gc/z/zStat.hpp" #include "gc/z/zUtils.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/powerOfTwo.hpp" #include "utilities/spinYield.hpp" @@ -60,13 +60,13 @@ void ZLiveMap::reset(ZGenerationId id) { // Multiple threads can enter here, make sure only one of them // resets the marking information while the others busy wait. - for (uint32_t seqnum = Atomic::load_acquire(&_seqnum); + for (uint32_t seqnum = AtomicAccess::load_acquire(&_seqnum); seqnum != generation->seqnum(); - seqnum = Atomic::load_acquire(&_seqnum)) { + seqnum = AtomicAccess::load_acquire(&_seqnum)) { if (seqnum != seqnum_initializing) { // No one has claimed initialization of the livemap yet - if (Atomic::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum) { + if (AtomicAccess::cmpxchg(&_seqnum, seqnum, seqnum_initializing) == seqnum) { // This thread claimed the initialization // Reset marking information @@ -87,7 +87,7 @@ void ZLiveMap::reset(ZGenerationId id) { // before the update of the page seqnum, such that when the // up-to-date seqnum is load acquired, the bit maps will not // contain stale information. - Atomic::release_store(&_seqnum, generation->seqnum()); + AtomicAccess::release_store(&_seqnum, generation->seqnum()); break; } } diff --git a/src/hotspot/share/gc/z/zLiveMap.inline.hpp b/src/hotspot/share/gc/z/zLiveMap.inline.hpp index a7f836a85596e..e6176b928ff91 100644 --- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp +++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp @@ -31,7 +31,7 @@ #include "gc/z/zGeneration.inline.hpp" #include "gc/z/zMark.hpp" #include "gc/z/zUtils.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/debug.hpp" @@ -40,7 +40,7 @@ inline void ZLiveMap::reset() { } inline bool ZLiveMap::is_marked(ZGenerationId id) const { - return Atomic::load_acquire(&_seqnum) == ZGeneration::generation(id)->seqnum(); + return AtomicAccess::load_acquire(&_seqnum) == ZGeneration::generation(id)->seqnum(); } inline uint32_t ZLiveMap::live_objects() const { @@ -116,8 +116,8 @@ inline bool ZLiveMap::set(ZGenerationId id, BitMap::idx_t index, bool finalizabl } inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) { - Atomic::add(&_live_objects, objects); - Atomic::add(&_live_bytes, bytes); + AtomicAccess::add(&_live_objects, objects); + AtomicAccess::add(&_live_bytes, bytes); } inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const { diff --git a/src/hotspot/share/gc/z/zLock.inline.hpp b/src/hotspot/share/gc/z/zLock.inline.hpp index a45f789742400..edf59be5b4c87 100644 --- a/src/hotspot/share/gc/z/zLock.inline.hpp +++ b/src/hotspot/share/gc/z/zLock.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/z/zLock.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/os.inline.hpp" #include "utilities/debug.hpp" @@ -50,11 +50,11 @@ inline ZReentrantLock::ZReentrantLock() inline void ZReentrantLock::lock() { Thread* const thread = Thread::current(); - Thread* const owner = Atomic::load(&_owner); + Thread* const owner = AtomicAccess::load(&_owner); if (owner != thread) { _lock.lock(); - Atomic::store(&_owner, thread); + AtomicAccess::store(&_owner, thread); } _count++; @@ -67,14 +67,14 @@ inline void ZReentrantLock::unlock() { _count--; if (_count == 0) { - Atomic::store(&_owner, (Thread*)nullptr); + AtomicAccess::store(&_owner, (Thread*)nullptr); _lock.unlock(); } } inline bool ZReentrantLock::is_owned() const { Thread* const thread = Thread::current(); - Thread* const owner = Atomic::load(&_owner); + Thread* const owner = AtomicAccess::load(&_owner); return owner == thread; } diff --git a/src/hotspot/share/gc/z/zMappedCache.cpp b/src/hotspot/share/gc/z/zMappedCache.cpp index 394c5649c1470..a9336fddafd2b 100644 --- a/src/hotspot/share/gc/z/zMappedCache.cpp +++ b/src/hotspot/share/gc/z/zMappedCache.cpp @@ -216,7 +216,7 @@ void ZMappedCache::Tree::replace(TreeNode* old_node, TreeNode* new_node, const T } size_t ZMappedCache::Tree::size_atomic() const { - return Atomic::load(&_num_nodes); + return AtomicAccess::load(&_num_nodes); } const ZMappedCache::TreeNode* ZMappedCache::Tree::left_most() const { diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp index 482b4ddd75f21..cdd4ac1617935 100644 --- a/src/hotspot/share/gc/z/zMark.cpp +++ b/src/hotspot/share/gc/z/zMark.cpp @@ -57,7 +57,7 @@ #include "memory/iterator.inline.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/handshake.hpp" #include "runtime/javaThread.hpp" @@ -595,7 +595,7 @@ bool ZMark::flush() { } bool ZMark::try_terminate_flush() { - Atomic::inc(&_work_nterminateflush); + AtomicAccess::inc(&_work_nterminateflush); _terminate.set_resurrected(false); if (ZVerifyMarking) { @@ -611,12 +611,12 @@ bool ZMark::try_proactive_flush() { return false; } - if (Atomic::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax) { + if (AtomicAccess::load(&_work_nproactiveflush) == ZMarkProactiveFlushMax) { // Limit reached or we're trying to terminate return false; } - Atomic::inc(&_work_nproactiveflush); + AtomicAccess::inc(&_work_nproactiveflush); SuspendibleThreadSetLeaver sts_leaver; return flush(); diff --git a/src/hotspot/share/gc/z/zMarkStack.cpp b/src/hotspot/share/gc/z/zMarkStack.cpp index 692defc50f184..97c2564b014c8 100644 --- a/src/hotspot/share/gc/z/zMarkStack.cpp +++ b/src/hotspot/share/gc/z/zMarkStack.cpp @@ -25,7 +25,7 @@ #include "gc/z/zMarkStack.inline.hpp" #include "gc/z/zMarkTerminate.inline.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" #include "utilities/powerOfTwo.hpp" @@ -72,12 +72,12 @@ ZMarkStackList::ZMarkStackList() _length() {} bool ZMarkStackList::is_empty() const { - return Atomic::load(&_head) == nullptr; + return AtomicAccess::load(&_head) == nullptr; } void ZMarkStackList::push(ZMarkStack* stack) { ZMarkStackListNode* const node = new ZMarkStackListNode(stack); - ZMarkStackListNode* head = Atomic::load(&_head); + ZMarkStackListNode* head = AtomicAccess::load(&_head); for (;;) { node->set_next(head); // Between reading the head and the linearizing CAS that pushes @@ -87,13 +87,13 @@ void ZMarkStackList::push(ZMarkStack* stack) { // situation and run this loop one more time, we would end up // having the same side effects: set the next pointer to the same // head again, and CAS the head link. - ZMarkStackListNode* prev = Atomic::cmpxchg(&_head, head, node, memory_order_release); + ZMarkStackListNode* prev = AtomicAccess::cmpxchg(&_head, head, node, memory_order_release); if (prev == head) { // Success // Bookkeep the population count - Atomic::inc(&_length, memory_order_relaxed); + AtomicAccess::inc(&_length, memory_order_relaxed); return; } @@ -105,7 +105,7 @@ void ZMarkStackList::push(ZMarkStack* stack) { ZMarkStack* ZMarkStackList::pop(ZMarkingSMR* marking_smr) { ZMarkStackListNode* volatile* const hazard_ptr = marking_smr->hazard_ptr(); - ZMarkStackListNode* head = Atomic::load(&_head); + ZMarkStackListNode* head = AtomicAccess::load(&_head); for (;;) { if (head == nullptr) { // Stack is empty @@ -115,7 +115,7 @@ ZMarkStack* ZMarkStackList::pop(ZMarkingSMR* marking_smr) { // Establish what the head is and publish a hazard pointer denoting // that the head is not safe to concurrently free while we are in the // middle of popping it and finding out that we lost the race. - Atomic::store(hazard_ptr, head); + AtomicAccess::store(hazard_ptr, head); // A full fence is needed to ensure the store and subsequent load do // not reorder. If they did reorder, the second head load could happen @@ -127,7 +127,7 @@ ZMarkStack* ZMarkStackList::pop(ZMarkingSMR* marking_smr) { // the next pointer load below observes the next pointer published // with the releasing CAS for the push operation that published the // marking stack. - ZMarkStackListNode* const head_after_publish = Atomic::load_acquire(&_head); + ZMarkStackListNode* const head_after_publish = AtomicAccess::load_acquire(&_head); if (head_after_publish != head) { // Race during hazard pointer publishing head = head_after_publish; @@ -141,7 +141,7 @@ ZMarkStack* ZMarkStackList::pop(ZMarkingSMR* marking_smr) { // Popping entries from the list does not require any particular memory // ordering. - ZMarkStackListNode* const prev = Atomic::cmpxchg(&_head, head, next, memory_order_relaxed); + ZMarkStackListNode* const prev = AtomicAccess::cmpxchg(&_head, head, next, memory_order_relaxed); if (prev == head) { // Success @@ -149,10 +149,10 @@ ZMarkStack* ZMarkStackList::pop(ZMarkingSMR* marking_smr) { // The ABA hazard is gone after the CAS. We use release_store to ensure // that the relinquishing of the hazard pointer becomes observable after // the unlinking CAS. - Atomic::release_store(hazard_ptr, (ZMarkStackListNode*)nullptr); + AtomicAccess::release_store(hazard_ptr, (ZMarkStackListNode*)nullptr); // Perform bookkeeping of the population count. - Atomic::dec(&_length, memory_order_relaxed); + AtomicAccess::dec(&_length, memory_order_relaxed); ZMarkStack* result = head->stack(); @@ -167,7 +167,7 @@ ZMarkStack* ZMarkStackList::pop(ZMarkingSMR* marking_smr) { } size_t ZMarkStackList::length() const { - const ssize_t result = Atomic::load(&_length); + const ssize_t result = AtomicAccess::load(&_length); if (result < 0) { return 0; @@ -221,7 +221,7 @@ bool ZMarkStripeSet::try_set_nstripes(size_t old_nstripes, size_t new_nstripes) // Mutators may read these values concurrently. It doesn't matter // if they see the old or new values. - if (Atomic::cmpxchg(&_nstripes_mask, old_nstripes_mask, new_nstripes_mask) == old_nstripes_mask) { + if (AtomicAccess::cmpxchg(&_nstripes_mask, old_nstripes_mask, new_nstripes_mask) == old_nstripes_mask) { log_debug(gc, marking)("Using %zu mark stripes", new_nstripes); return true; } @@ -230,7 +230,7 @@ bool ZMarkStripeSet::try_set_nstripes(size_t old_nstripes, size_t new_nstripes) } size_t ZMarkStripeSet::nstripes() const { - return Atomic::load(&_nstripes_mask) + 1; + return AtomicAccess::load(&_nstripes_mask) + 1; } bool ZMarkStripeSet::is_empty() const { @@ -258,7 +258,7 @@ bool ZMarkStripeSet::is_crowded() const { } ZMarkStripe* ZMarkStripeSet::stripe_for_worker(uint nworkers, uint worker_id) { - const size_t mask = Atomic::load(&_nstripes_mask); + const size_t mask = AtomicAccess::load(&_nstripes_mask); const size_t nstripes = mask + 1; const size_t spillover_limit = (nworkers / nstripes) * nstripes; diff --git a/src/hotspot/share/gc/z/zMarkStack.inline.hpp b/src/hotspot/share/gc/z/zMarkStack.inline.hpp index ed3b167762cde..28f6482aaaa7d 100644 --- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp @@ -92,7 +92,7 @@ inline ZMarkStripe* ZMarkStripeSet::stripe_next(ZMarkStripe* stripe) { } inline ZMarkStripe* ZMarkStripeSet::stripe_for_addr(uintptr_t addr) { - const size_t index = (addr >> ZMarkStripeShift) & Atomic::load(&_nstripes_mask); + const size_t index = (addr >> ZMarkStripeShift) & AtomicAccess::load(&_nstripes_mask); assert(index < ZMarkStripesMax, "Invalid index"); return &_stripes[index]; } diff --git a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp index cdd9e7dce2525..575044e3a3945 100644 --- a/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp +++ b/src/hotspot/share/gc/z/zMarkTerminate.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/z/zLock.inline.hpp" #include "gc/z/zMarkStack.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/thread.inline.hpp" @@ -42,8 +42,8 @@ inline ZMarkTerminate::ZMarkTerminate() _lock() {} inline void ZMarkTerminate::reset(uint nworkers) { - Atomic::store(&_nworkers, nworkers); - Atomic::store(&_nworking, nworkers); + AtomicAccess::store(&_nworkers, nworkers); + AtomicAccess::store(&_nworking, nworkers); _nawakening = 0; } @@ -51,7 +51,7 @@ inline void ZMarkTerminate::leave() { SuspendibleThreadSetLeaver sts_leaver; ZLocker locker(&_lock); - Atomic::store(&_nworking, _nworking - 1); + AtomicAccess::store(&_nworking, _nworking - 1); if (_nworking == 0) { // Last thread leaving; notify waiters _lock.notify_all(); @@ -69,7 +69,7 @@ inline bool ZMarkTerminate::try_terminate(ZMarkStripeSet* stripes, size_t used_n SuspendibleThreadSetLeaver sts_leaver; ZLocker locker(&_lock); - Atomic::store(&_nworking, _nworking - 1); + AtomicAccess::store(&_nworking, _nworking - 1); if (_nworking == 0) { // Last thread entering termination: success _lock.notify_all(); @@ -84,7 +84,7 @@ inline bool ZMarkTerminate::try_terminate(ZMarkStripeSet* stripes, size_t used_n // We either got notification about more work // or got a spurious wakeup; don't terminate if (_nawakening > 0) { - Atomic::store(&_nawakening, _nawakening - 1); + AtomicAccess::store(&_nawakening, _nawakening - 1); } if (_nworking == 0) { @@ -92,15 +92,15 @@ inline bool ZMarkTerminate::try_terminate(ZMarkStripeSet* stripes, size_t used_n return true; } - Atomic::store(&_nworking, _nworking + 1); + AtomicAccess::store(&_nworking, _nworking + 1); return false; } inline void ZMarkTerminate::wake_up() { - uint nworking = Atomic::load(&_nworking); - uint nawakening = Atomic::load(&_nawakening); - if (nworking + nawakening == Atomic::load(&_nworkers)) { + uint nworking = AtomicAccess::load(&_nworking); + uint nawakening = AtomicAccess::load(&_nawakening); + if (nworking + nawakening == AtomicAccess::load(&_nworkers)) { // Everyone is working or about to return; } @@ -113,22 +113,22 @@ inline void ZMarkTerminate::wake_up() { ZLocker locker(&_lock); if (_nworking + _nawakening != _nworkers) { // Everyone is not working - Atomic::store(&_nawakening, _nawakening + 1); + AtomicAccess::store(&_nawakening, _nawakening + 1); _lock.notify(); } } inline bool ZMarkTerminate::saturated() const { - uint nworking = Atomic::load(&_nworking); - uint nawakening = Atomic::load(&_nawakening); + uint nworking = AtomicAccess::load(&_nworking); + uint nawakening = AtomicAccess::load(&_nawakening); - return nworking + nawakening == Atomic::load(&_nworkers); + return nworking + nawakening == AtomicAccess::load(&_nworkers); } inline void ZMarkTerminate::set_resurrected(bool value) { // Update resurrected if it changed if (resurrected() != value) { - Atomic::store(&_resurrected, value); + AtomicAccess::store(&_resurrected, value); if (value) { log_debug(gc, marking)("Resurrection broke termination"); } else { @@ -138,7 +138,7 @@ inline void ZMarkTerminate::set_resurrected(bool value) { } inline bool ZMarkTerminate::resurrected() const { - return Atomic::load(&_resurrected); + return AtomicAccess::load(&_resurrected); } #endif // SHARE_GC_Z_ZMARKTERMINATE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zMarkingSMR.cpp b/src/hotspot/share/gc/z/zMarkingSMR.cpp index 2335d009c74ff..0f45f17466777 100644 --- a/src/hotspot/share/gc/z/zMarkingSMR.cpp +++ b/src/hotspot/share/gc/z/zMarkingSMR.cpp @@ -24,7 +24,7 @@ #include "gc/z/zMarkingSMR.hpp" #include "gc/z/zMarkStack.inline.hpp" #include "gc/z/zValue.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" ZMarkingSMR::ZMarkingSMR() : _worker_states(), @@ -70,7 +70,7 @@ void ZMarkingSMR::free_node(ZMarkStackListNode* node) { ZArray* const scanned_hazards = &local_state->_scanned_hazards; for (ZWorkerState* remote_state; iter.next(&remote_state);) { - ZMarkStackListNode* const hazard = Atomic::load(&remote_state->_hazard_ptr); + ZMarkStackListNode* const hazard = AtomicAccess::load(&remote_state->_hazard_ptr); if (hazard != nullptr) { scanned_hazards->append(hazard); diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index 3f65d2eea973e..a5d94fc1e5ebd 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -48,7 +48,7 @@ #include "memory/universe.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "utilities/debug.hpp" @@ -146,7 +146,7 @@ void ZNMethod::log_register(const nmethod* nm) { oop* const begin = nm->oops_begin(); oop* const end = nm->oops_end(); for (oop* p = begin; p < end; p++) { - const oop o = Atomic::load(p); // C1 PatchingStub may replace it concurrently. + const oop o = AtomicAccess::load(p); // C1 PatchingStub may replace it concurrently. const char* const external_name = (o == nullptr) ? "N/A" : o->klass()->external_name(); log_oops.print(" Oop: " PTR_FORMAT " (%s)", p2i(o), external_name); diff --git a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp index f34f520875237..bdd4270ddac3f 100644 --- a/src/hotspot/share/gc/z/zNMethodTableIteration.cpp +++ b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp @@ -24,7 +24,7 @@ #include "gc/z/zNMethodTableEntry.hpp" #include "gc/z/zNMethodTableIteration.hpp" #include "memory/iterator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -57,7 +57,7 @@ void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) { // Claim table partition. Each partition is currently sized to span // two cache lines. This number is just a guess, but seems to work well. const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry); - const size_t partition_start = MIN2(Atomic::fetch_then_add(&_claimed, partition_size), _size); + const size_t partition_start = MIN2(AtomicAccess::fetch_then_add(&_claimed, partition_size), _size); const size_t partition_end = MIN2(partition_start + partition_size, _size); if (partition_start == partition_end) { // End of table diff --git a/src/hotspot/share/gc/z/zObjectAllocator.cpp b/src/hotspot/share/gc/z/zObjectAllocator.cpp index 63e7f2b4ae99f..b92c97d1ba726 100644 --- a/src/hotspot/share/gc/z/zObjectAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp @@ -31,7 +31,7 @@ #include "gc/z/zPageAge.inline.hpp" #include "gc/z/zPageType.hpp" #include "gc/z/zValue.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.hpp" #include "utilities/align.hpp" @@ -66,7 +66,7 @@ zaddress ZObjectAllocator::PerAge::alloc_object_in_shared_page(ZPage** shared_pa size_t size, ZAllocationFlags flags) { zaddress addr = zaddress::null; - ZPage* page = Atomic::load_acquire(shared_page); + ZPage* page = AtomicAccess::load_acquire(shared_page); if (page != nullptr) { addr = page->alloc_object_atomic(size); @@ -81,7 +81,7 @@ zaddress ZObjectAllocator::PerAge::alloc_object_in_shared_page(ZPage** shared_pa retry: // Install new page - ZPage* const prev_page = Atomic::cmpxchg(shared_page, page, new_page); + ZPage* const prev_page = AtomicAccess::cmpxchg(shared_page, page, new_page); if (prev_page != page) { if (prev_page == nullptr) { // Previous page was retired, retry installing the new page @@ -113,7 +113,7 @@ zaddress ZObjectAllocator::PerAge::alloc_object_in_medium_page(size_t size, ZAllocationFlags flags) { zaddress addr = zaddress::null; ZPage** shared_medium_page = _shared_medium_page.addr(); - ZPage* page = Atomic::load_acquire(shared_medium_page); + ZPage* page = AtomicAccess::load_acquire(shared_medium_page); if (page != nullptr) { addr = page->alloc_object_atomic(size); @@ -227,7 +227,7 @@ size_t ZObjectAllocator::fast_available(ZPageAge age) const { assert(Thread::current()->is_Java_thread(), "Should be a Java thread"); ZPage* const* const shared_addr = allocator(age)->shared_small_page_addr(); - const ZPage* const page = Atomic::load_acquire(shared_addr); + const ZPage* const page = AtomicAccess::load_acquire(shared_addr); if (page != nullptr) { return page->remaining(); } diff --git a/src/hotspot/share/gc/z/zPage.cpp b/src/hotspot/share/gc/z/zPage.cpp index 80e10dc75ec64..9f4654a655f76 100644 --- a/src/hotspot/share/gc/z/zPage.cpp +++ b/src/hotspot/share/gc/z/zPage.cpp @@ -79,8 +79,8 @@ const ZGeneration* ZPage::generation() const { } void ZPage::reset_seqnum() { - Atomic::store(&_seqnum, generation()->seqnum()); - Atomic::store(&_seqnum_other, ZGeneration::generation(_generation_id == ZGenerationId::young ? ZGenerationId::old : ZGenerationId::young)->seqnum()); + AtomicAccess::store(&_seqnum, generation()->seqnum()); + AtomicAccess::store(&_seqnum_other, ZGeneration::generation(_generation_id == ZGenerationId::young ? ZGenerationId::old : ZGenerationId::young)->seqnum()); } void ZPage::remset_alloc() { diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp index f9a0dbf328d26..970ee6600a511 100644 --- a/src/hotspot/share/gc/z/zPage.inline.hpp +++ b/src/hotspot/share/gc/z/zPage.inline.hpp @@ -33,7 +33,7 @@ #include "gc/z/zRememberedSet.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" #include "logging/logStream.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -468,7 +468,7 @@ inline zaddress ZPage::alloc_object_atomic(size_t size) { return zaddress::null; } - const zoffset_end prev_top = Atomic::cmpxchg(&_top, addr, new_top); + const zoffset_end prev_top = AtomicAccess::cmpxchg(&_top, addr, new_top); if (prev_top == addr) { // Success return ZOffset::address(to_zoffset(addr)); @@ -512,7 +512,7 @@ inline bool ZPage::undo_alloc_object_atomic(zaddress addr, size_t size) { return false; } - const zoffset_end prev_top = Atomic::cmpxchg(&_top, old_top, new_top); + const zoffset_end prev_top = AtomicAccess::cmpxchg(&_top, old_top, new_top); if (prev_top == old_top) { // Success return true; diff --git a/src/hotspot/share/gc/z/zPageAllocator.cpp b/src/hotspot/share/gc/z/zPageAllocator.cpp index 9f484394521a5..9f762dd403ec2 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.cpp +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp @@ -650,7 +650,7 @@ size_t ZPartition::increase_capacity(size_t size) { if (increased > 0) { // Update atomically since we have concurrent readers - Atomic::add(&_capacity, increased); + AtomicAccess::add(&_capacity, increased); _uncommitter.cancel_uncommit_cycle(); } @@ -660,12 +660,12 @@ size_t ZPartition::increase_capacity(size_t size) { void ZPartition::decrease_capacity(size_t size, bool set_max_capacity) { // Update capacity atomically since we have concurrent readers - Atomic::sub(&_capacity, size); + AtomicAccess::sub(&_capacity, size); // Adjust current max capacity to avoid further attempts to increase capacity if (set_max_capacity) { const size_t current_max_capacity_before = _current_max_capacity; - Atomic::store(&_current_max_capacity, _capacity); + AtomicAccess::store(&_current_max_capacity, _capacity); log_debug_p(gc)("Forced to lower max partition (%u) capacity from " "%zuM(%.0f%%) to %zuM(%.0f%%)", @@ -935,7 +935,7 @@ class ZPreTouchTask : public ZTask { for (;;) { // Claim an offset for this thread - const uintptr_t claimed = Atomic::fetch_then_add(&_current, size); + const uintptr_t claimed = AtomicAccess::fetch_then_add(&_current, size); if (claimed >= _end) { // Done break; @@ -1280,7 +1280,7 @@ size_t ZPageAllocator::max_capacity() const { size_t ZPageAllocator::soft_max_capacity() const { const size_t current_max_capacity = ZPageAllocator::current_max_capacity(); - const size_t soft_max_heapsize = Atomic::load(&SoftMaxHeapSize); + const size_t soft_max_heapsize = AtomicAccess::load(&SoftMaxHeapSize); return MIN2(soft_max_heapsize, current_max_capacity); } @@ -1289,7 +1289,7 @@ size_t ZPageAllocator::current_max_capacity() const { ZPartitionConstIterator iter = partition_iterator(); for (const ZPartition* partition; iter.next(&partition);) { - current_max_capacity += Atomic::load(&partition->_current_max_capacity); + current_max_capacity += AtomicAccess::load(&partition->_current_max_capacity); } return current_max_capacity; @@ -1300,18 +1300,18 @@ size_t ZPageAllocator::capacity() const { ZPartitionConstIterator iter = partition_iterator(); for (const ZPartition* partition; iter.next(&partition);) { - capacity += Atomic::load(&partition->_capacity); + capacity += AtomicAccess::load(&partition->_capacity); } return capacity; } size_t ZPageAllocator::used() const { - return Atomic::load(&_used); + return AtomicAccess::load(&_used); } size_t ZPageAllocator::used_generation(ZGenerationId id) const { - return Atomic::load(&_used_generations[(int)id]); + return AtomicAccess::load(&_used_generations[(int)id]); } size_t ZPageAllocator::unused() const { @@ -1321,8 +1321,8 @@ size_t ZPageAllocator::unused() const { ZPartitionConstIterator iter = partition_iterator(); for (const ZPartition* partition; iter.next(&partition);) { - capacity += (ssize_t)Atomic::load(&partition->_capacity); - claimed += (ssize_t)Atomic::load(&partition->_claimed); + capacity += (ssize_t)AtomicAccess::load(&partition->_capacity); + claimed += (ssize_t)AtomicAccess::load(&partition->_claimed); } const ssize_t unused = capacity - used - claimed; @@ -1376,12 +1376,12 @@ ZPageAllocatorStats ZPageAllocator::update_and_stats(ZGeneration* generation) { void ZPageAllocator::increase_used_generation(ZGenerationId id, size_t size) { // Update atomically since we have concurrent readers and writers - Atomic::add(&_used_generations[(int)id], size, memory_order_relaxed); + AtomicAccess::add(&_used_generations[(int)id], size, memory_order_relaxed); } void ZPageAllocator::decrease_used_generation(ZGenerationId id, size_t size) { // Update atomically since we have concurrent readers and writers - Atomic::sub(&_used_generations[(int)id], size, memory_order_relaxed); + AtomicAccess::sub(&_used_generations[(int)id], size, memory_order_relaxed); } void ZPageAllocator::promote_used(const ZPage* from, const ZPage* to) { @@ -2229,7 +2229,7 @@ size_t ZPageAllocator::sum_available() const { void ZPageAllocator::increase_used(size_t size) { // Update atomically since we have concurrent readers - const size_t used = Atomic::add(&_used, size); + const size_t used = AtomicAccess::add(&_used, size); // Update used high for (auto& stats : _collection_stats) { @@ -2241,7 +2241,7 @@ void ZPageAllocator::increase_used(size_t size) { void ZPageAllocator::decrease_used(size_t size) { // Update atomically since we have concurrent readers - const size_t used = Atomic::sub(&_used, size); + const size_t used = AtomicAccess::sub(&_used, size); // Update used low for (auto& stats : _collection_stats) { diff --git a/src/hotspot/share/gc/z/zReferenceProcessor.cpp b/src/hotspot/share/gc/z/zReferenceProcessor.cpp index 1af4deaec0339..229365a2e7e13 100644 --- a/src/hotspot/share/gc/z/zReferenceProcessor.cpp +++ b/src/hotspot/share/gc/z/zReferenceProcessor.cpp @@ -35,7 +35,7 @@ #include "gc/z/zValue.inline.hpp" #include "memory/universe.hpp" #include "oops/access.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" @@ -316,7 +316,7 @@ void ZReferenceProcessor::process_worker_discovered_list(zaddress discovered_lis // Anything kept on the list? if (!is_null(keep_head)) { - const zaddress old_pending_list = Atomic::xchg(_pending_list.addr(), keep_head); + const zaddress old_pending_list = AtomicAccess::xchg(_pending_list.addr(), keep_head); // Concatenate the old list reference_set_discovered(keep_tail, old_pending_list); @@ -335,7 +335,7 @@ void ZReferenceProcessor::work() { ZPerWorkerIterator iter(&_discovered_list); for (zaddress* start; iter.next(&start);) { - const zaddress discovered_list = Atomic::xchg(start, zaddress::null); + const zaddress discovered_list = AtomicAccess::xchg(start, zaddress::null); if (discovered_list != zaddress::null) { // Process discovered references diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp index 556f413348b03..3726534588b86 100644 --- a/src/hotspot/share/gc/z/zRelocate.cpp +++ b/src/hotspot/share/gc/z/zRelocate.cpp @@ -48,7 +48,7 @@ #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.hpp" #include "prims/jvmtiTagMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall"); @@ -64,16 +64,16 @@ ZRelocateQueue::ZRelocateQueue() _needs_attention(0) {} bool ZRelocateQueue::needs_attention() const { - return Atomic::load(&_needs_attention) != 0; + return AtomicAccess::load(&_needs_attention) != 0; } void ZRelocateQueue::inc_needs_attention() { - const int needs_attention = Atomic::add(&_needs_attention, 1); + const int needs_attention = AtomicAccess::add(&_needs_attention, 1); assert(needs_attention == 1 || needs_attention == 2, "Invalid state"); } void ZRelocateQueue::dec_needs_attention() { - const int needs_attention = Atomic::sub(&_needs_attention, 1); + const int needs_attention = AtomicAccess::sub(&_needs_attention, 1); assert(needs_attention == 0 || needs_attention == 1, "Invalid state"); } @@ -83,12 +83,12 @@ void ZRelocateQueue::activate(uint nworkers) { } void ZRelocateQueue::deactivate() { - Atomic::store(&_is_active, false); + AtomicAccess::store(&_is_active, false); clear(); } bool ZRelocateQueue::is_active() const { - return Atomic::load(&_is_active); + return AtomicAccess::load(&_is_active); } void ZRelocateQueue::join(uint nworkers) { @@ -463,7 +463,7 @@ class ZRelocateSmallAllocator { ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { ZPage* const page = alloc_page(forwarding); if (page == nullptr) { - Atomic::inc(&_in_place_count); + AtomicAccess::inc(&_in_place_count); } if (target != nullptr) { @@ -539,7 +539,7 @@ class ZRelocateMediumAllocator { ZPage* const to_page = alloc_page(forwarding); _shared_targets->set(partition_id, to_age, to_page); if (to_page == nullptr) { - Atomic::inc(&_in_place_count); + AtomicAccess::inc(&_in_place_count); _in_place = true; } @@ -741,7 +741,7 @@ class ZRelocateWork : public StackObj { } static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) { - const zpointer ptr = Atomic::load(p); + const zpointer ptr = AtomicAccess::load(p); assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr)); @@ -1211,7 +1211,7 @@ class ZRelocateTask : public ZRestartableTask { } if (ZNUMA::is_enabled()) { - Atomic::add(&_numa_local_forwardings, numa_local_forwardings_worker, memory_order_relaxed); + AtomicAccess::add(&_numa_local_forwardings, numa_local_forwardings_worker, memory_order_relaxed); } _queue->leave(); @@ -1223,7 +1223,7 @@ class ZRelocateTask : public ZRestartableTask { }; static void remap_and_maybe_add_remset(volatile zpointer* p) { - const zpointer ptr = Atomic::load(p); + const zpointer ptr = AtomicAccess::load(p); if (ZPointer::is_store_good(ptr)) { // Already has a remset entry diff --git a/src/hotspot/share/gc/z/zRelocationSet.cpp b/src/hotspot/share/gc/z/zRelocationSet.cpp index 8cc7bceb8e849..fb86593469014 100644 --- a/src/hotspot/share/gc/z/zRelocationSet.cpp +++ b/src/hotspot/share/gc/z/zRelocationSet.cpp @@ -33,7 +33,7 @@ #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zWorkers.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" class ZRelocationSetInstallTask : public ZTask { diff --git a/src/hotspot/share/gc/z/zRemembered.cpp b/src/hotspot/share/gc/z/zRemembered.cpp index 815b914dd8db0..31619dc4513fc 100644 --- a/src/hotspot/share/gc/z/zRemembered.cpp +++ b/src/hotspot/share/gc/z/zRemembered.cpp @@ -403,7 +403,7 @@ ZRemsetTableIterator::ZRemsetTableIterator(ZRemembered* remembered, bool previou // This iterator uses the "found old" optimization. bool ZRemsetTableIterator::next(ZRemsetTableEntry* entry_addr) { - BitMap::idx_t prev = Atomic::load(&_claimed); + BitMap::idx_t prev = AtomicAccess::load(&_claimed); for (;;) { if (prev == _bm->size()) { @@ -412,11 +412,11 @@ bool ZRemsetTableIterator::next(ZRemsetTableEntry* entry_addr) { const BitMap::idx_t page_index = _bm->find_first_set_bit(_claimed); if (page_index == _bm->size()) { - Atomic::cmpxchg(&_claimed, prev, page_index, memory_order_relaxed); + AtomicAccess::cmpxchg(&_claimed, prev, page_index, memory_order_relaxed); return false; } - const BitMap::idx_t res = Atomic::cmpxchg(&_claimed, prev, page_index + 1, memory_order_relaxed); + const BitMap::idx_t res = AtomicAccess::cmpxchg(&_claimed, prev, page_index + 1, memory_order_relaxed); if (res != prev) { // Someone else claimed prev = res; diff --git a/src/hotspot/share/gc/z/zResurrection.cpp b/src/hotspot/share/gc/z/zResurrection.cpp index 4e9aa0448da3f..466f09ece4725 100644 --- a/src/hotspot/share/gc/z/zResurrection.cpp +++ b/src/hotspot/share/gc/z/zResurrection.cpp @@ -22,7 +22,7 @@ */ #include "gc/z/zResurrection.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/safepoint.hpp" #include "utilities/debug.hpp" @@ -37,5 +37,5 @@ void ZResurrection::unblock() { // No need for anything stronger than a relaxed store here. // The preceding handshake makes sure that all non-strong // oops have already been healed at this point. - Atomic::store(&_blocked, false); + AtomicAccess::store(&_blocked, false); } diff --git a/src/hotspot/share/gc/z/zResurrection.inline.hpp b/src/hotspot/share/gc/z/zResurrection.inline.hpp index 325f8a1acfa03..5cbb99be8d91f 100644 --- a/src/hotspot/share/gc/z/zResurrection.inline.hpp +++ b/src/hotspot/share/gc/z/zResurrection.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,10 @@ #include "gc/z/zResurrection.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline bool ZResurrection::is_blocked() { - return Atomic::load(&_blocked); + return AtomicAccess::load(&_blocked); } #endif // SHARE_GC_Z_ZRESURRECTION_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zRootsIterator.cpp b/src/hotspot/share/gc/z/zRootsIterator.cpp index d5c413f593881..b8cb72057a86b 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.cpp +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp @@ -29,7 +29,7 @@ #include "gc/z/zStat.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiTagMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/safepoint.hpp" #include "utilities/debug.hpp" @@ -91,10 +91,10 @@ class ZRootStatTimer { template template void ZParallelApply::apply(ClosureType* cl) { - if (!Atomic::load(&_completed)) { + if (!AtomicAccess::load(&_completed)) { _iter.apply(cl); - if (!Atomic::load(&_completed)) { - Atomic::store(&_completed, true); + if (!AtomicAccess::load(&_completed)) { + AtomicAccess::store(&_completed, true); } } } @@ -120,7 +120,7 @@ void ZCLDsIteratorAll::apply(CLDClosure* cl) { } uint ZJavaThreadsIterator::claim() { - return Atomic::fetch_then_add(&_claimed, 1u); + return AtomicAccess::fetch_then_add(&_claimed, 1u); } void ZJavaThreadsIterator::apply(ThreadClosure* cl) { diff --git a/src/hotspot/share/gc/z/zStat.cpp b/src/hotspot/share/gc/z/zStat.cpp index c6f23ab5b67e2..f703b3a17917b 100644 --- a/src/hotspot/share/gc/z/zStat.cpp +++ b/src/hotspot/share/gc/z/zStat.cpp @@ -38,7 +38,7 @@ #include "gc/z/zUtils.inline.hpp" #include "memory/metaspaceUtils.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" #include "runtime/timer.hpp" @@ -445,9 +445,9 @@ ZStatSamplerData ZStatSampler::collect_and_reset() const { for (uint32_t i = 0; i < ncpus; i++) { ZStatSamplerData* const cpu_data = get_cpu_local(i); if (cpu_data->_nsamples > 0) { - const uint64_t nsamples = Atomic::xchg(&cpu_data->_nsamples, (uint64_t)0); - const uint64_t sum = Atomic::xchg(&cpu_data->_sum, (uint64_t)0); - const uint64_t max = Atomic::xchg(&cpu_data->_max, (uint64_t)0); + const uint64_t nsamples = AtomicAccess::xchg(&cpu_data->_nsamples, (uint64_t)0); + const uint64_t sum = AtomicAccess::xchg(&cpu_data->_sum, (uint64_t)0); + const uint64_t max = AtomicAccess::xchg(&cpu_data->_max, (uint64_t)0); all._nsamples += nsamples; all._sum += sum; if (all._max < max) { @@ -480,7 +480,7 @@ void ZStatCounter::sample_and_reset() const { const uint32_t ncpus = ZCPU::count(); for (uint32_t i = 0; i < ncpus; i++) { ZStatCounterData* const cpu_data = get_cpu_local(i); - counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); + counter += AtomicAccess::xchg(&cpu_data->_counter, (uint64_t)0); } ZStatSample(_sampler, counter); @@ -502,7 +502,7 @@ ZStatCounterData ZStatUnsampledCounter::collect_and_reset() const { const uint32_t ncpus = ZCPU::count(); for (uint32_t i = 0; i < ncpus; i++) { ZStatCounterData* const cpu_data = get_cpu_local(i); - all._counter += Atomic::xchg(&cpu_data->_counter, (uint64_t)0); + all._counter += AtomicAccess::xchg(&cpu_data->_counter, (uint64_t)0); } return all; @@ -892,8 +892,8 @@ ZStatTimerWorker::ZStatTimerWorker(const ZStatPhase& phase) // void ZStatSample(const ZStatSampler& sampler, uint64_t value) { ZStatSamplerData* const cpu_data = sampler.get(); - Atomic::add(&cpu_data->_nsamples, 1u); - Atomic::add(&cpu_data->_sum, value); + AtomicAccess::add(&cpu_data->_nsamples, 1u); + AtomicAccess::add(&cpu_data->_sum, value); uint64_t max = cpu_data->_max; for (;;) { @@ -903,7 +903,7 @@ void ZStatSample(const ZStatSampler& sampler, uint64_t value) { } const uint64_t new_max = value; - const uint64_t prev_max = Atomic::cmpxchg(&cpu_data->_max, max, new_max); + const uint64_t prev_max = AtomicAccess::cmpxchg(&cpu_data->_max, max, new_max); if (prev_max == max) { // Success break; @@ -922,14 +922,14 @@ void ZStatDurationSample(const ZStatSampler& sampler, const Tickspan& duration) void ZStatInc(const ZStatCounter& counter, uint64_t increment) { ZStatCounterData* const cpu_data = counter.get(); - const uint64_t value = Atomic::add(&cpu_data->_counter, increment); + const uint64_t value = AtomicAccess::add(&cpu_data->_counter, increment); ZTracer::report_stat_counter(counter, increment, value); } void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) { ZStatCounterData* const cpu_data = counter.get(); - Atomic::add(&cpu_data->_counter, increment); + AtomicAccess::add(&cpu_data->_counter, increment); } // @@ -956,9 +956,9 @@ void ZStatMutatorAllocRate::update_sampling_granule() { } void ZStatMutatorAllocRate::sample_allocation(size_t allocation_bytes) { - const size_t allocated = Atomic::add(&_allocated_since_sample, allocation_bytes); + const size_t allocated = AtomicAccess::add(&_allocated_since_sample, allocation_bytes); - if (allocated < Atomic::load(&_sampling_granule)) { + if (allocated < AtomicAccess::load(&_sampling_granule)) { // No need for sampling yet return; } @@ -968,7 +968,7 @@ void ZStatMutatorAllocRate::sample_allocation(size_t allocation_bytes) { return; } - const size_t allocated_sample = Atomic::load(&_allocated_since_sample); + const size_t allocated_sample = AtomicAccess::load(&_allocated_since_sample); if (allocated_sample < _sampling_granule) { // Someone beat us to it @@ -985,7 +985,7 @@ void ZStatMutatorAllocRate::sample_allocation(size_t allocation_bytes) { return; } - Atomic::sub(&_allocated_since_sample, allocated_sample); + AtomicAccess::sub(&_allocated_since_sample, allocated_sample); _samples_time.add(elapsed); _samples_bytes.add(allocated_sample); diff --git a/src/hotspot/share/gc/z/zTLABUsage.cpp b/src/hotspot/share/gc/z/zTLABUsage.cpp index cd8141e47ffbb..c2e3db6bedfb2 100644 --- a/src/hotspot/share/gc/z/zTLABUsage.cpp +++ b/src/hotspot/share/gc/z/zTLABUsage.cpp @@ -23,24 +23,24 @@ #include "gc/z/zTLABUsage.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" ZTLABUsage::ZTLABUsage() : _used(0), _used_history() {} void ZTLABUsage::increase_used(size_t size) { - Atomic::add(&_used, size, memory_order_relaxed); + AtomicAccess::add(&_used, size, memory_order_relaxed); } void ZTLABUsage::decrease_used(size_t size) { precond(size <= _used); - Atomic::sub(&_used, size, memory_order_relaxed); + AtomicAccess::sub(&_used, size, memory_order_relaxed); } void ZTLABUsage::reset() { - const size_t used = Atomic::xchg(&_used, (size_t) 0); + const size_t used = AtomicAccess::xchg(&_used, (size_t) 0); // Avoid updates when nothing has been allocated since the last YC if (used == 0) { diff --git a/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp b/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp index 128d8f1bfef7e..4d932c8c4174d 100644 --- a/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp +++ b/src/hotspot/share/gc/z/zUncoloredRoot.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ template inline void ZUncoloredRoot::barrier(ObjectFunctionT function, zaddress_unsafe* p, uintptr_t color) { z_verify_safepoints_are_blocked(); - const zaddress_unsafe addr = Atomic::load(p); + const zaddress_unsafe addr = AtomicAccess::load(p); assert_is_valid(addr); // Nothing to do for nulls diff --git a/src/hotspot/share/gc/z/zUncommitter.cpp b/src/hotspot/share/gc/z/zUncommitter.cpp index 7e9cad5ab0a2e..96af67a77b539 100644 --- a/src/hotspot/share/gc/z/zUncommitter.cpp +++ b/src/hotspot/share/gc/z/zUncommitter.cpp @@ -400,7 +400,7 @@ size_t ZUncommitter::uncommit() { } // Record flushed memory as claimed and how much we've flushed for this partition - Atomic::add(&_partition->_claimed, flushed); + AtomicAccess::add(&_partition->_claimed, flushed); } // Unmap and uncommit flushed memory @@ -416,7 +416,7 @@ size_t ZUncommitter::uncommit() { ZLocker locker(&_partition->_page_allocator->_lock); // Adjust claimed and capacity to reflect the uncommit - Atomic::sub(&_partition->_claimed, flushed); + AtomicAccess::sub(&_partition->_claimed, flushed); _partition->decrease_capacity(flushed, false /* set_max_capacity */); register_uncommit(flushed); } diff --git a/src/hotspot/share/gc/z/zUnload.cpp b/src/hotspot/share/gc/z/zUnload.cpp index 3dc7ecd3edc41..c8b32385fcdb1 100644 --- a/src/hotspot/share/gc/z/zUnload.cpp +++ b/src/hotspot/share/gc/z/zUnload.cpp @@ -54,7 +54,7 @@ class ZIsUnloadingOopClosure : public OopClosure { virtual void do_oop(oop* p) { // Create local, aligned root - zaddress_unsafe addr = Atomic::load(ZUncoloredRoot::cast(p)); + zaddress_unsafe addr = AtomicAccess::load(ZUncoloredRoot::cast(p)); ZUncoloredRoot::process_no_keepalive(&addr, _color); if (!is_null(addr) && ZHeap::heap()->is_old(safe(addr)) && !ZHeap::heap()->is_object_live(safe(addr))) { diff --git a/src/hotspot/share/gc/z/zVerify.cpp b/src/hotspot/share/gc/z/zVerify.cpp index 53cbcd0a42185..55f13be9b4465 100644 --- a/src/hotspot/share/gc/z/zVerify.cpp +++ b/src/hotspot/share/gc/z/zVerify.cpp @@ -444,7 +444,7 @@ class ZVerifyObjectClosure : public ObjectClosure, public OopFieldClosure { virtual void do_field(oop base, oop* p) { _visited_base = to_zaddress(base); _visited_p = (volatile zpointer*)p; - _visited_ptr_pre_loaded = Atomic::load(_visited_p); + _visited_ptr_pre_loaded = AtomicAccess::load(_visited_p); } }; @@ -652,7 +652,7 @@ class ZVerifyRemsetAfterOopClosure : public BasicOopIterateClosure { virtual void do_oop(oop* p_) { volatile zpointer* const p = (volatile zpointer*)p_; - const zpointer ptr = Atomic::load(p); + const zpointer ptr = AtomicAccess::load(p); // Order this load w.r.t. the was_remembered load which can race when // the remset scanning of the to-space object is concurrently forgetting @@ -695,7 +695,7 @@ class ZVerifyRemsetAfterOopClosure : public BasicOopIterateClosure { } OrderAccess::loadload(); - if (Atomic::load(p) != ptr) { + if (AtomicAccess::load(p) != ptr) { // Order the was_remembered bitmap load w.r.t. the reload of the zpointer. // Sometimes the was_remembered() call above races with clearing of the // previous bits, when the to-space object is concurrently forgetting diff --git a/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp b/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp index 7373f451cea3b..ea319b480823e 100644 --- a/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp +++ b/src/hotspot/share/gc/z/zWeakRootsProcessor.cpp @@ -30,7 +30,7 @@ #include "gc/z/zWeakRootsProcessor.hpp" #include "gc/z/zWorkers.hpp" #include "memory/iterator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" class ZPhantomCleanOopClosure : public OopClosure { diff --git a/src/hotspot/share/gc/z/zWorkers.inline.hpp b/src/hotspot/share/gc/z/zWorkers.inline.hpp index ee1c5f476e31f..0a2e3c7521072 100644 --- a/src/hotspot/share/gc/z/zWorkers.inline.hpp +++ b/src/hotspot/share/gc/z/zWorkers.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,10 +26,10 @@ #include "gc/z/zWorkers.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline bool ZWorkers::should_worker_resize() { - return Atomic::load(&_requested_nworkers) != 0; + return AtomicAccess::load(&_requested_nworkers) != 0; } #endif // SHARE_GC_Z_ZWORKERS_INLINE_HPP diff --git a/src/hotspot/share/interpreter/bytecodeTracer.cpp b/src/hotspot/share/interpreter/bytecodeTracer.cpp index 34170108dc4b0..f9980e389e261 100644 --- a/src/hotspot/share/interpreter/bytecodeTracer.cpp +++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp @@ -33,7 +33,7 @@ #include "oops/constantPool.inline.hpp" #include "oops/method.hpp" #include "oops/methodData.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/osThread.hpp" @@ -184,10 +184,10 @@ static Method* _method_currently_being_printed = nullptr; void BytecodeTracer::trace_interpreter(const methodHandle& method, address bcp, uintptr_t tos, uintptr_t tos2, outputStream* st) { if (TraceBytecodes && BytecodeCounter::counter_value() >= TraceBytecodesAt) { - BytecodePrinter printer(Atomic::load_acquire(&_method_currently_being_printed)); + BytecodePrinter printer(AtomicAccess::load_acquire(&_method_currently_being_printed)); printer.trace(method, bcp, tos, tos2, st); // Save method currently being printed to detect when method printing changes. - Atomic::release_store(&_method_currently_being_printed, method()); + AtomicAccess::release_store(&_method_currently_being_printed, method()); } } #endif diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index ae103c8a339ec..3a9b8a3ba0a4a 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -55,7 +55,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" @@ -352,7 +352,7 @@ JRT_ENTRY(void, InterpreterRuntime::throw_StackOverflowError(JavaThread* current vmClasses::StackOverflowError_klass(), CHECK); // Increment counter for hs_err file reporting - Atomic::inc(&Exceptions::_stack_overflow_errors); + AtomicAccess::inc(&Exceptions::_stack_overflow_errors); // Remove the ScopedValue bindings in case we got a StackOverflowError // while we were trying to manipulate ScopedValue bindings. current->clear_scopedValueBindings(); @@ -366,7 +366,7 @@ JRT_ENTRY(void, InterpreterRuntime::throw_delayed_StackOverflowError(JavaThread* java_lang_Throwable::set_message(exception(), Universe::delayed_stack_overflow_error_message()); // Increment counter for hs_err file reporting - Atomic::inc(&Exceptions::_stack_overflow_errors); + AtomicAccess::inc(&Exceptions::_stack_overflow_errors); // Remove the ScopedValue bindings in case we got a StackOverflowError // while we were trying to manipulate ScopedValue bindings. current->clear_scopedValueBindings(); diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp index e577ce42c1ea1..29d6825d3e5ac 100644 --- a/src/hotspot/share/interpreter/oopMapCache.cpp +++ b/src/hotspot/share/interpreter/oopMapCache.cpp @@ -30,7 +30,7 @@ #include "memory/resourceArea.hpp" #include "oops/generateOopMap.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/signature.hpp" @@ -448,11 +448,11 @@ OopMapCache::~OopMapCache() { } OopMapCacheEntry* OopMapCache::entry_at(int i) const { - return Atomic::load_acquire(&(_array[i % size])); + return AtomicAccess::load_acquire(&(_array[i % size])); } bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { - return Atomic::cmpxchg(&_array[i % size], old, entry) == old; + return AtomicAccess::cmpxchg(&_array[i % size], old, entry) == old; } void OopMapCache::flush() { @@ -562,9 +562,9 @@ void OopMapCache::lookup(const methodHandle& method, void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { while (true) { - OopMapCacheEntry* head = Atomic::load(&_old_entries); + OopMapCacheEntry* head = AtomicAccess::load(&_old_entries); entry->_next = head; - if (Atomic::cmpxchg(&_old_entries, head, entry) == head) { + if (AtomicAccess::cmpxchg(&_old_entries, head, entry) == head) { // Enqueued successfully. break; } @@ -578,7 +578,7 @@ void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { } bool OopMapCache::has_cleanup_work() { - return Atomic::load(&_old_entries) != nullptr; + return AtomicAccess::load(&_old_entries) != nullptr; } void OopMapCache::try_trigger_cleanup() { @@ -592,7 +592,7 @@ void OopMapCache::try_trigger_cleanup() { } void OopMapCache::cleanup() { - OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr); + OopMapCacheEntry* entry = AtomicAccess::xchg(&_old_entries, (OopMapCacheEntry*)nullptr); if (entry == nullptr) { // No work. return; diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp index e9adfce6372bd..74e7b9eec3432 100644 --- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp @@ -51,7 +51,7 @@ #include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/globals.hpp" diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp index 83873e2e50026..74059bcba434b 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp @@ -41,7 +41,7 @@ #include "logging/log.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/orderAccess.hpp" #include "runtime/safepoint.hpp" @@ -88,8 +88,8 @@ void ObjectSampler::oop_storage_gc_notification(size_t num_dead) { // The ObjectSampler instance may have already been cleaned or a new // instance was created concurrently. This allows for a small race where cleaning // could be done again. - Atomic::store(&_dead_samples, true); - Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value()); + AtomicAccess::store(&_dead_samples, true); + AtomicAccess::store(&_last_sweep, (int64_t)JfrTicks::now().value()); } } @@ -113,8 +113,8 @@ ObjectSampler::ObjectSampler(size_t size) : _total_allocated(0), _threshold(0), _size(size) { - Atomic::store(&_dead_samples, false); - Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value()); + AtomicAccess::store(&_dead_samples, false); + AtomicAccess::store(&_last_sweep, (int64_t)JfrTicks::now().value()); } ObjectSampler::~ObjectSampler() { @@ -156,7 +156,7 @@ void ObjectSampler::destroy() { static volatile int _lock = 0; ObjectSampler* ObjectSampler::acquire() { - while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {} + while (AtomicAccess::cmpxchg(&_lock, 0, 1) == 1) {} return _instance; } @@ -240,10 +240,10 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool assert(thread_id != 0, "invariant"); assert(thread != nullptr, "invariant"); - if (Atomic::load(&_dead_samples)) { + if (AtomicAccess::load(&_dead_samples)) { // There's a small race where a GC scan might reset this to true, potentially // causing a back-to-back scavenge. - Atomic::store(&_dead_samples, false); + AtomicAccess::store(&_dead_samples, false); scavenge(); } @@ -360,5 +360,5 @@ ObjectSample* ObjectSampler::item_at(int index) { } int64_t ObjectSampler::last_sweep() { - return Atomic::load(&_last_sweep); + return AtomicAccess::load(&_last_sweep); } diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp index 579710a62a7a4..3fca3ad7631ad 100644 --- a/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp +++ b/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp @@ -33,7 +33,7 @@ #include "jfr/utilities/jfrTypes.hpp" #include "jfrfiles/jfrEventClasses.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/osThread.hpp" #include "runtime/safepointMechanism.inline.hpp" @@ -84,11 +84,11 @@ bool JfrCPUTimeTraceQueue::enqueue(JfrCPUTimeSampleRequest& request) { assert(&JavaThread::current()->jfr_thread_local()->cpu_time_jfr_queue() == this, "invariant"); u4 elementIndex; do { - elementIndex = Atomic::load_acquire(&_head); + elementIndex = AtomicAccess::load_acquire(&_head); if (elementIndex >= _capacity) { return false; } - } while (Atomic::cmpxchg(&_head, elementIndex, elementIndex + 1) != elementIndex); + } while (AtomicAccess::cmpxchg(&_head, elementIndex, elementIndex + 1) != elementIndex); _data[elementIndex] = request; return true; } @@ -101,19 +101,19 @@ JfrCPUTimeSampleRequest& JfrCPUTimeTraceQueue::at(u4 index) { static volatile u4 _lost_samples_sum = 0; u4 JfrCPUTimeTraceQueue::size() const { - return Atomic::load_acquire(&_head); + return AtomicAccess::load_acquire(&_head); } void JfrCPUTimeTraceQueue::set_size(u4 size) { - Atomic::release_store(&_head, size); + AtomicAccess::release_store(&_head, size); } u4 JfrCPUTimeTraceQueue::capacity() const { - return Atomic::load_acquire(&_capacity); + return AtomicAccess::load_acquire(&_capacity); } void JfrCPUTimeTraceQueue::set_capacity(u4 capacity) { - if (capacity == Atomic::load(&_capacity)) { + if (capacity == AtomicAccess::load(&_capacity)) { return; } _head = 0; @@ -126,32 +126,32 @@ void JfrCPUTimeTraceQueue::set_capacity(u4 capacity) { } else { _data = nullptr; } - Atomic::release_store(&_capacity, capacity); + AtomicAccess::release_store(&_capacity, capacity); } bool JfrCPUTimeTraceQueue::is_empty() const { - return Atomic::load_acquire(&_head) == 0; + return AtomicAccess::load_acquire(&_head) == 0; } u4 JfrCPUTimeTraceQueue::lost_samples() const { - return Atomic::load(&_lost_samples); + return AtomicAccess::load(&_lost_samples); } void JfrCPUTimeTraceQueue::increment_lost_samples() { - Atomic::inc(&_lost_samples_sum); - Atomic::inc(&_lost_samples); + AtomicAccess::inc(&_lost_samples_sum); + AtomicAccess::inc(&_lost_samples); } void JfrCPUTimeTraceQueue::increment_lost_samples_due_to_queue_full() { - Atomic::inc(&_lost_samples_due_to_queue_full); + AtomicAccess::inc(&_lost_samples_due_to_queue_full); } u4 JfrCPUTimeTraceQueue::get_and_reset_lost_samples() { - return Atomic::xchg(&_lost_samples, (u4)0); + return AtomicAccess::xchg(&_lost_samples, (u4)0); } u4 JfrCPUTimeTraceQueue::get_and_reset_lost_samples_due_to_queue_full() { - return Atomic::xchg(&_lost_samples_due_to_queue_full, (u4)0); + return AtomicAccess::xchg(&_lost_samples_due_to_queue_full, (u4)0); } void JfrCPUTimeTraceQueue::init() { @@ -159,7 +159,7 @@ void JfrCPUTimeTraceQueue::init() { } void JfrCPUTimeTraceQueue::clear() { - Atomic::release_store(&_head, (u4)0); + AtomicAccess::release_store(&_head, (u4)0); } void JfrCPUTimeTraceQueue::resize_if_needed() { @@ -167,7 +167,7 @@ void JfrCPUTimeTraceQueue::resize_if_needed() { if (lost_samples_due_to_queue_full == 0) { return; } - u4 capacity = Atomic::load(&_capacity); + u4 capacity = AtomicAccess::load(&_capacity); if (capacity < CPU_TIME_QUEUE_MAX_CAPACITY) { float ratio = (float)lost_samples_due_to_queue_full / (float)capacity; int factor = 1; @@ -246,7 +246,7 @@ class JfrCPUSamplerThread : public NonJavaThread { void recompute_period_if_needed(); void set_throttle(JfrCPUSamplerThrottle& throttle); - int64_t get_sampling_period() const { return Atomic::load(&_current_sampling_period_ns); }; + int64_t get_sampling_period() const { return AtomicAccess::load(&_current_sampling_period_ns); }; void sample_thread(JfrSampleRequest& request, void* ucontext, JavaThread* jt, JfrThreadLocal* tl, JfrTicks& now); @@ -280,11 +280,11 @@ class JfrCPUSamplerThread : public NonJavaThread { #ifdef ASSERT void set_out_of_stack_walking_enabled(bool runnable) { - Atomic::release_store(&_out_of_stack_walking_enabled, runnable); + AtomicAccess::release_store(&_out_of_stack_walking_enabled, runnable); } u8 out_of_stack_walking_iterations() const { - return Atomic::load(&_out_of_stack_walking_iterations); + return AtomicAccess::load(&_out_of_stack_walking_iterations); } #endif }; @@ -302,12 +302,12 @@ JfrCPUSamplerThread::JfrCPUSamplerThread(JfrCPUSamplerThrottle& throttle) : } void JfrCPUSamplerThread::trigger_async_processing_of_cpu_time_jfr_requests() { - Atomic::release_store(&_is_async_processing_of_cpu_time_jfr_requests_triggered, true); + AtomicAccess::release_store(&_is_async_processing_of_cpu_time_jfr_requests_triggered, true); } void JfrCPUSamplerThread::on_javathread_create(JavaThread* thread) { if (thread->is_hidden_from_external_view() || thread->is_JfrRecorder_thread() || - !Atomic::load_acquire(&_signal_handler_installed)) { + !AtomicAccess::load_acquire(&_signal_handler_installed)) { return; } JfrThreadLocal* tl = thread->jfr_thread_local(); @@ -317,7 +317,7 @@ void JfrCPUSamplerThread::on_javathread_create(JavaThread* thread) { if (create_timer_for_thread(thread, timerid)) { tl->set_cpu_timer(&timerid); } else { - if (!Atomic::or_then_fetch(&_warned_about_timer_creation_failure, true)) { + if (!AtomicAccess::or_then_fetch(&_warned_about_timer_creation_failure, true)) { log_warning(jfr)("Failed to create timer for a thread"); } tl->deallocate_cpu_time_jfr_queue(); @@ -350,8 +350,8 @@ void JfrCPUSamplerThread::start_thread() { } void JfrCPUSamplerThread::enroll() { - if (Atomic::cmpxchg(&_disenrolled, true, false)) { - Atomic::store(&_warned_about_timer_creation_failure, false); + if (AtomicAccess::cmpxchg(&_disenrolled, true, false)) { + AtomicAccess::store(&_warned_about_timer_creation_failure, false); initialize_active_signal_handler_counter(); log_trace(jfr)("Enrolling CPU thread sampler"); _sample.signal(); @@ -365,9 +365,9 @@ void JfrCPUSamplerThread::enroll() { } void JfrCPUSamplerThread::disenroll() { - if (!Atomic::cmpxchg(&_disenrolled, false, true)) { + if (!AtomicAccess::cmpxchg(&_disenrolled, false, true)) { log_trace(jfr)("Disenrolling CPU thread sampler"); - if (Atomic::load_acquire(&_signal_handler_installed)) { + if (AtomicAccess::load_acquire(&_signal_handler_installed)) { stop_timer(); stop_signal_handlers(); } @@ -391,9 +391,9 @@ void JfrCPUSamplerThread::run() { recompute_period_if_needed(); last_recompute_check = os::javaTimeNanos(); } - DEBUG_ONLY(if (Atomic::load_acquire(&_out_of_stack_walking_enabled)) {) - if (Atomic::cmpxchg(&_is_async_processing_of_cpu_time_jfr_requests_triggered, true, false)) { - DEBUG_ONLY(Atomic::inc(&_out_of_stack_walking_iterations);) + DEBUG_ONLY(if (AtomicAccess::load_acquire(&_out_of_stack_walking_enabled)) {) + if (AtomicAccess::cmpxchg(&_is_async_processing_of_cpu_time_jfr_requests_triggered, true, false)) { + DEBUG_ONLY(AtomicAccess::inc(&_out_of_stack_walking_iterations);) stackwalk_threads_in_native(); } DEBUG_ONLY(}) @@ -450,12 +450,12 @@ void JfrCPUTimeThreadSampling::send_event(const JfrTicks &start_time, traceid si event.set_samplingPeriod(cpu_time_period); event.set_biased(biased); event.commit(); - Atomic::inc(&count); + AtomicAccess::inc(&count); if (biased) { - Atomic::inc(&biased_count); + AtomicAccess::inc(&biased_count); } - if (Atomic::load(&count) % 1000 == 0) { - log_debug(jfr)("CPU thread sampler sent %zu events, lost %d, biased %zu\n", Atomic::load(&count), Atomic::load(&_lost_samples_sum), Atomic::load(&biased_count)); + if (AtomicAccess::load(&count) % 1000 == 0) { + log_debug(jfr)("CPU thread sampler sent %zu events, lost %d, biased %zu\n", AtomicAccess::load(&count), AtomicAccess::load(&_lost_samples_sum), AtomicAccess::load(&biased_count)); } } @@ -703,8 +703,8 @@ bool JfrCPUSamplerThread::create_timer_for_thread(JavaThread* thread, timer_t& t void JfrCPUSamplerThread::stop_signal_handlers() { // set the stop signal bit - Atomic::or_then_fetch(&_active_signal_handlers, STOP_SIGNAL_BIT, memory_order_acq_rel); - while (Atomic::load_acquire(&_active_signal_handlers) > STOP_SIGNAL_BIT) { + AtomicAccess::or_then_fetch(&_active_signal_handlers, STOP_SIGNAL_BIT, memory_order_acq_rel); + while (AtomicAccess::load_acquire(&_active_signal_handlers) > STOP_SIGNAL_BIT) { // wait for all signal handlers to finish os::naked_short_nanosleep(1000); } @@ -713,21 +713,21 @@ void JfrCPUSamplerThread::stop_signal_handlers() { // returns false if the stop signal bit was set, true otherwise bool JfrCPUSamplerThread::increment_signal_handler_count() { // increment the count of active signal handlers - u4 old_value = Atomic::fetch_then_add(&_active_signal_handlers, (u4)1, memory_order_acq_rel); + u4 old_value = AtomicAccess::fetch_then_add(&_active_signal_handlers, (u4)1, memory_order_acq_rel); if ((old_value & STOP_SIGNAL_BIT) != 0) { // if the stop signal bit was set, we are not allowed to increment - Atomic::dec(&_active_signal_handlers, memory_order_acq_rel); + AtomicAccess::dec(&_active_signal_handlers, memory_order_acq_rel); return false; } return true; } void JfrCPUSamplerThread::decrement_signal_handler_count() { - Atomic::dec(&_active_signal_handlers, memory_order_acq_rel); + AtomicAccess::dec(&_active_signal_handlers, memory_order_acq_rel); } void JfrCPUSamplerThread::initialize_active_signal_handler_counter() { - Atomic::release_store(&_active_signal_handlers, (u4)0); + AtomicAccess::release_store(&_active_signal_handlers, (u4)0); } class VM_JFRInitializeCPUTimeSampler : public VM_Operation { @@ -754,7 +754,7 @@ bool JfrCPUSamplerThread::init_timers() { log_error(jfr)("Conflicting SIGPROF handler found: %p. CPUTimeSample events will not be recorded", prev_handler); return false; } - Atomic::release_store(&_signal_handler_installed, true); + AtomicAccess::release_store(&_signal_handler_installed, true); VM_JFRInitializeCPUTimeSampler op(this); VMThread::execute(&op); return true; @@ -792,17 +792,17 @@ void JfrCPUSamplerThread::recompute_period_if_needed() { int64_t current_period = get_sampling_period(); int64_t period = _throttle.compute_sampling_period(); if (period != current_period) { - Atomic::store(&_current_sampling_period_ns, period); + AtomicAccess::store(&_current_sampling_period_ns, period); update_all_thread_timers(); } } void JfrCPUSamplerThread::set_throttle(JfrCPUSamplerThrottle& throttle) { _throttle = throttle; - if (_throttle.enabled() && Atomic::load_acquire(&_disenrolled) == false) { + if (_throttle.enabled() && AtomicAccess::load_acquire(&_disenrolled) == false) { recompute_period_if_needed(); } else { - Atomic::store(&_current_sampling_period_ns, _throttle.compute_sampling_period()); + AtomicAccess::store(&_current_sampling_period_ns, _throttle.compute_sampling_period()); } } diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp index 6347abd654fe6..ae79c8bb6e386 100644 --- a/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp +++ b/src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp @@ -32,7 +32,7 @@ #include "jfr/utilities/jfrTryLock.hpp" #include "jfr/utilities/jfrTypes.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -83,8 +83,8 @@ class JfrSamplerThread : public NonJavaThread { virtual const char* name() const { return "JFR Sampler Thread"; } virtual const char* type_name() const { return "JfrSamplerThread"; } bool is_JfrSampler_thread() const { return true; } - int64_t java_period() const { return Atomic::load(&_java_period_millis); }; - int64_t native_period() const { return Atomic::load(&_native_period_millis); }; + int64_t java_period() const { return AtomicAccess::load(&_java_period_millis); }; + int64_t native_period() const { return AtomicAccess::load(&_native_period_millis); }; }; JfrSamplerThread::JfrSamplerThread(int64_t java_period_millis, int64_t native_period_millis, u4 max_frames) : @@ -376,12 +376,12 @@ bool JfrSamplerThread::sample_native_thread(JavaThread* jt) { void JfrSamplerThread::set_java_period(int64_t period_millis) { assert(period_millis >= 0, "invariant"); - Atomic::store(&_java_period_millis, period_millis); + AtomicAccess::store(&_java_period_millis, period_millis); } void JfrSamplerThread::set_native_period(int64_t period_millis) { assert(period_millis >= 0, "invariant"); - Atomic::store(&_native_period_millis, period_millis); + AtomicAccess::store(&_native_period_millis, period_millis); } // JfrThreadSampler; diff --git a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp index b0f4461a82c07..b8ff3ba504a03 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp @@ -51,7 +51,7 @@ #include "logging/log.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutex.hpp" diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp index 7006d6c865a55..2fcb1a64bafd0 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp @@ -31,7 +31,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiThreadState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/jniHandles.inline.hpp" #include "utilities/growableArray.hpp" @@ -43,7 +43,7 @@ static traceid atomic_inc(traceid volatile* const dest, traceid stride = 1) { do { compare_value = *dest; exchange_value = compare_value + stride; - } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value); + } while (AtomicAccess::cmpxchg(dest, compare_value, exchange_value) != compare_value); return exchange_value; } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp index 034b8e653cf68..63c4cacc4550f 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp @@ -28,7 +28,7 @@ #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.hpp" #include "oops/method.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/macros.hpp" #ifdef VM_LITTLE_ENDIAN @@ -106,7 +106,7 @@ inline void set_cas_form(uint8_t bits, uint8_t volatile* dest) { do { const uint8_t current = *dest; const uint8_t new_value = op(bits, current); - if (current == new_value || Atomic::cmpxchg(dest, current, new_value) == current) { + if (current == new_value || AtomicAccess::cmpxchg(dest, current, new_value) == current) { return; } } while (true); diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp index a4ada594700ec..cb4b33a36487f 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp @@ -24,7 +24,7 @@ #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp" #include "jfr/support/jfrThreadId.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutex.hpp" #include "runtime/safepoint.hpp" @@ -54,14 +54,14 @@ void JfrTraceIdEpoch::shift_epoch() { void JfrTraceIdEpoch::set_method_tracer_tag_state() { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); - Atomic::release_store(&_method_tracer_state, true); + AtomicAccess::release_store(&_method_tracer_state, true); } void JfrTraceIdEpoch::reset_method_tracer_tag_state() { assert_locked_or_safepoint(ClassLoaderDataGraph_lock); - Atomic::release_store(&_method_tracer_state, false); + AtomicAccess::release_store(&_method_tracer_state, false); } bool JfrTraceIdEpoch::has_method_tracer_changed_tag_state() { - return Atomic::load_acquire(&_method_tracer_state); + return AtomicAccess::load_acquire(&_method_tracer_state); } diff --git a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp index da61942539365..370214f1440bc 100644 --- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp +++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp @@ -32,7 +32,7 @@ #include "jvm_io.h" #include "logging/log.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutexLocker.hpp" @@ -523,7 +523,7 @@ static bool prepare_for_emergency_dump(Thread* thread) { static volatile int jfr_shutdown_lock = 0; static bool guard_reentrancy() { - return Atomic::cmpxchg(&jfr_shutdown_lock, 0, 1) == 0; + return AtomicAccess::cmpxchg(&jfr_shutdown_lock, 0, 1) == 0; } class JavaThreadInVMAndNative : public StackObj { diff --git a/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp b/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp index b1bddfff466a2..a9ba456ad760f 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp @@ -24,7 +24,7 @@ #include "jfr/recorder/service/jfrPostBox.hpp" #include "jfr/utilities/jfrTryLock.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" @@ -85,10 +85,10 @@ void JfrPostBox::post(JFR_Msg msg) { void JfrPostBox::deposit(int new_messages) { while (true) { - const int current_msgs = Atomic::load(&_messages); + const int current_msgs = AtomicAccess::load(&_messages); // OR the new message const int exchange_value = current_msgs | new_messages; - const int result = Atomic::cmpxchg(&_messages, current_msgs, exchange_value); + const int result = AtomicAccess::cmpxchg(&_messages, current_msgs, exchange_value); if (result == current_msgs) { return; } @@ -116,7 +116,7 @@ void JfrPostBox::synchronous_post(int msg) { deposit(msg); // serial_id is used to check when what we send in has been processed. // _msg_read_serial is read under JfrMsg_lock protection. - const uintptr_t serial_id = Atomic::load(&_msg_read_serial) + 1; + const uintptr_t serial_id = AtomicAccess::load(&_msg_read_serial) + 1; msg_lock.notify_all(); while (!is_message_processed(serial_id)) { msg_lock.wait(); @@ -131,17 +131,17 @@ void JfrPostBox::synchronous_post(int msg) { */ bool JfrPostBox::is_message_processed(uintptr_t serial_id) const { assert(JfrMsg_lock->owned_by_self(), "_msg_handled_serial must be read under JfrMsg_lock protection"); - return serial_id <= Atomic::load(&_msg_handled_serial); + return serial_id <= AtomicAccess::load(&_msg_handled_serial); } bool JfrPostBox::is_empty() const { assert(JfrMsg_lock->owned_by_self(), "not holding JfrMsg_lock!"); - return Atomic::load(&_messages) == 0; + return AtomicAccess::load(&_messages) == 0; } int JfrPostBox::collect() { // get pending and reset to 0 - const int messages = Atomic::xchg(&_messages, 0); + const int messages = AtomicAccess::xchg(&_messages, 0); if (check_waiters(messages)) { _has_waiters = true; assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_read_serial is protected by JfrMsg_lock"); diff --git a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp index e8a3ea935de45..0087980f430ef 100644 --- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp +++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp @@ -46,7 +46,7 @@ #include "jfr/writers/jfrJavaEventWriter.hpp" #include "jfrfiles/jfrEventClasses.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutexLocker.hpp" @@ -67,7 +67,7 @@ class JfrRotationLock : public StackObj { bool _recursive; static bool acquire(Thread* thread) { - if (Atomic::cmpxchg(&_lock, 0, 1) == 0) { + if (AtomicAccess::cmpxchg(&_lock, 0, 1) == 0) { assert(_owner_thread == nullptr, "invariant"); _owner_thread = thread; return true; diff --git a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp index 5c734e5c3690b..a8cd25dd85f20 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp @@ -59,7 +59,7 @@ void JfrBuffer::reinitialize() { } const u1* JfrBuffer::top() const { - return Atomic::load_acquire(&_top); + return AtomicAccess::load_acquire(&_top); } const u1* JfrBuffer::stable_top() const { @@ -73,14 +73,14 @@ const u1* JfrBuffer::stable_top() const { void JfrBuffer::set_top(const u1* new_top) { assert(new_top <= end(), "invariant"); assert(new_top >= start(), "invariant"); - Atomic::release_store(&_top, new_top); + AtomicAccess::release_store(&_top, new_top); } const u1* JfrBuffer::acquire_critical_section_top() const { do { const u1* current_top = stable_top(); assert(current_top != TOP_CRITICAL_SECTION, "invariant"); - if (Atomic::cmpxchg(&_top, current_top, TOP_CRITICAL_SECTION) == current_top) { + if (AtomicAccess::cmpxchg(&_top, current_top, TOP_CRITICAL_SECTION) == current_top) { return current_top; } } while (true); @@ -105,13 +105,13 @@ void JfrBuffer::acquire(const void* id) { const void* current_id; do { current_id = identity(); - } while (current_id != nullptr || Atomic::cmpxchg(&_identity, current_id, id) != current_id); + } while (current_id != nullptr || AtomicAccess::cmpxchg(&_identity, current_id, id) != current_id); } bool JfrBuffer::try_acquire(const void* id) { assert(id != nullptr, "invariant"); const void* const current_id = identity(); - return current_id == nullptr && Atomic::cmpxchg(&_identity, current_id, id) == current_id; + return current_id == nullptr && AtomicAccess::cmpxchg(&_identity, current_id, id) == current_id; } void JfrBuffer::set_identity(const void* id) { @@ -123,7 +123,7 @@ void JfrBuffer::set_identity(const void* id) { void JfrBuffer::release() { assert(identity() != nullptr, "invariant"); - Atomic::release_store(&_identity, (const void*)nullptr); + AtomicAccess::release_store(&_identity, (const void*)nullptr); } #ifdef ASSERT @@ -178,7 +178,7 @@ enum FLAG { inline u1 load(const volatile u1* dest) { assert(dest != nullptr, "invariant"); - return Atomic::load_acquire(dest); + return AtomicAccess::load_acquire(dest); } inline void set(u1* dest, u1 data) { diff --git a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.hpp b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.hpp index 7a7e7fdf21fb2..c3e29e00fe626 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrBuffer.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrBuffer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_JFR_RECORDER_STORAGE_JFRBUFFER_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/sizes.hpp" // @@ -112,7 +112,7 @@ class JfrBuffer { void set_pos(u1* new_pos) { assert(new_pos <= end(), "invariant"); - Atomic::release_store(&_pos, new_pos); + AtomicAccess::release_store(&_pos, new_pos); } void set_pos(size_t size) { @@ -135,17 +135,17 @@ class JfrBuffer { } size_t free_size() const { - return end() - Atomic::load_acquire(&_pos); + return end() - AtomicAccess::load_acquire(&_pos); } size_t unflushed_size() const; bool empty() const { - return Atomic::load_acquire(&_pos) == start(); + return AtomicAccess::load_acquire(&_pos) == start(); } const void* identity() const { - return Atomic::load_acquire(&_identity); + return AtomicAccess::load_acquire(&_identity); } // use only if implied owner already diff --git a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp index 18ed2272cfad7..1b6273b78d2b6 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrMemorySpace.inline.hpp @@ -28,7 +28,7 @@ #include "jfr/recorder/storage/jfrMemorySpace.hpp" #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" template class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware> @@ -267,7 +267,7 @@ inline void JfrMemorySpace class RetrievalPolicy, typename FreeListType, typename FullListType, bool epoch_aware> inline void JfrMemorySpace::decrement_free_list_count() { if (is_free_list_cache_limited()) { - Atomic::dec(&_free_list_cache_count); + AtomicAccess::dec(&_free_list_cache_count); } } diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp b/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp index 52a747fdad1bb..f0e4167ca34ba 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp @@ -23,7 +23,7 @@ */ #include "jfr/recorder/storage/jfrStorageControl.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" const size_t max_lease_factor = 2; JfrStorageControl::JfrStorageControl(size_t global_count_total, size_t in_memory_discard_threshold) : @@ -48,7 +48,7 @@ size_t JfrStorageControl::full_count() const { } bool JfrStorageControl::increment_full() { - const size_t result = Atomic::add(&_full_count, (size_t)1); + const size_t result = AtomicAccess::add(&_full_count, (size_t)1); return to_disk() && result > _to_disk_threshold; } @@ -59,12 +59,12 @@ size_t JfrStorageControl::decrement_full() { do { current = _full_count; exchange = current - 1; - } while (Atomic::cmpxchg(&_full_count, current, exchange) != current); + } while (AtomicAccess::cmpxchg(&_full_count, current, exchange) != current); return exchange; } void JfrStorageControl::reset_full() { - Atomic::store(&_full_count, (size_t)0); + AtomicAccess::store(&_full_count, (size_t)0); } bool JfrStorageControl::should_post_buffer_full_message() const { @@ -76,11 +76,11 @@ bool JfrStorageControl::should_discard() const { } size_t JfrStorageControl::global_lease_count() const { - return Atomic::load(&_global_lease_count); + return AtomicAccess::load(&_global_lease_count); } size_t JfrStorageControl::increment_leased() { - return Atomic::add(&_global_lease_count, (size_t)1); + return AtomicAccess::add(&_global_lease_count, (size_t)1); } size_t JfrStorageControl::decrement_leased() { @@ -89,7 +89,7 @@ size_t JfrStorageControl::decrement_leased() { do { current = _global_lease_count; exchange = current - 1; - } while (Atomic::cmpxchg(&_global_lease_count, current, exchange) != current); + } while (AtomicAccess::cmpxchg(&_global_lease_count, current, exchange) != current); return exchange; } diff --git a/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp b/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp index a80c16d2e14f4..5a7cb7e7a7884 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrStorageUtils.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "jfr/recorder/storage/jfrStorageUtils.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" template @@ -49,7 +49,7 @@ inline bool DefaultDiscarder::discard(T* t, const u1* data, size_t size) { template inline size_t get_unflushed_size(const u1* top, Type* t) { assert(t != nullptr, "invariant"); - return Atomic::load_acquire(t->pos_address()) - top; + return AtomicAccess::load_acquire(t->pos_address()) - top; } template @@ -152,7 +152,7 @@ template inline bool EpochDispatchOp::process(typename Operation::Type* t) { assert(t != nullptr, "invariant"); const u1* const current_top = _previous_epoch ? t->start() : t->top(); - const size_t unflushed_size = Atomic::load_acquire(t->pos_address()) - current_top; + const size_t unflushed_size = AtomicAccess::load_acquire(t->pos_address()) - current_top; assert((intptr_t)unflushed_size >= 0, "invariant"); if (unflushed_size == 0) { return true; diff --git a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp index e1733b0ed5adf..430aba572b825 100644 --- a/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp +++ b/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp @@ -37,7 +37,7 @@ #include "jfr/utilities/jfrSignal.hpp" #include "jfr/utilities/jfrTypes.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/safepoint.hpp" diff --git a/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp b/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp index 769a6b4a3e890..22399f42bbb51 100644 --- a/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp +++ b/src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp @@ -30,7 +30,7 @@ #include "jfr/utilities/jfrTimeConverter.hpp" #include "jfr/utilities/jfrTryLock.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include @@ -89,7 +89,7 @@ bool JfrAdaptiveSampler::sample(int64_t timestamp) { } inline const JfrSamplerWindow* JfrAdaptiveSampler::active_window() const { - return Atomic::load_acquire(&_active_window); + return AtomicAccess::load_acquire(&_active_window); } inline int64_t now() { @@ -97,7 +97,7 @@ inline int64_t now() { } inline bool JfrSamplerWindow::is_expired(int64_t timestamp) const { - const int64_t end_ticks = Atomic::load(&_end_ticks); + const int64_t end_ticks = AtomicAccess::load(&_end_ticks); return timestamp == 0 ? now() >= end_ticks : timestamp >= end_ticks; } @@ -108,7 +108,7 @@ bool JfrSamplerWindow::sample(int64_t timestamp, bool* expired_window) const { } inline bool JfrSamplerWindow::sample() const { - const size_t ordinal = Atomic::add(&_measured_population_size, static_cast(1)); + const size_t ordinal = AtomicAccess::add(&_measured_population_size, static_cast(1)); return ordinal <= _projected_population_size && ordinal % _sampling_interval == 0; } @@ -139,7 +139,7 @@ void JfrAdaptiveSampler::rotate(const JfrSamplerWindow* expired) { inline void JfrAdaptiveSampler::install(const JfrSamplerWindow* next) { assert(next != active_window(), "invariant"); - Atomic::release_store(&_active_window, next); + AtomicAccess::release_store(&_active_window, next); } const JfrSamplerWindow* JfrAdaptiveSampler::configure(const JfrSamplerParams& params, const JfrSamplerWindow* expired) { @@ -197,12 +197,12 @@ inline int64_t millis_to_countertime(int64_t millis) { void JfrSamplerWindow::initialize(const JfrSamplerParams& params) { assert(_sampling_interval >= 1, "invariant"); if (params.window_duration_ms == 0) { - Atomic::store(&_end_ticks, static_cast(0)); + AtomicAccess::store(&_end_ticks, static_cast(0)); return; } - Atomic::store(&_measured_population_size, static_cast(0)); + AtomicAccess::store(&_measured_population_size, static_cast(0)); const int64_t end_ticks = now() + millis_to_countertime(params.window_duration_ms); - Atomic::store(&_end_ticks, end_ticks); + AtomicAccess::store(&_end_ticks, end_ticks); } /* @@ -279,7 +279,7 @@ size_t JfrSamplerWindow::sample_size() const { } size_t JfrSamplerWindow::population_size() const { - return Atomic::load(&_measured_population_size); + return AtomicAccess::load(&_measured_population_size); } intptr_t JfrSamplerWindow::accumulated_debt() const { diff --git a/src/hotspot/share/jfr/support/jfrDeprecationManager.cpp b/src/hotspot/share/jfr/support/jfrDeprecationManager.cpp index 5d2e180cae93f..8886f412bb2f0 100644 --- a/src/hotspot/share/jfr/support/jfrDeprecationManager.cpp +++ b/src/hotspot/share/jfr/support/jfrDeprecationManager.cpp @@ -45,7 +45,7 @@ #include "memory/resourceArea.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/method.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/thread.inline.hpp" @@ -151,11 +151,11 @@ static bool max_limit_not_reached() { static size_t num_edges = 0; size_t compare_value; do { - compare_value = Atomic::load(&num_edges); + compare_value = AtomicAccess::load(&num_edges); if (compare_value == max_num_edges) { return false; } - } while (compare_value != Atomic::cmpxchg(&num_edges, compare_value, compare_value + 1)); + } while (compare_value != AtomicAccess::cmpxchg(&num_edges, compare_value, compare_value + 1)); if (compare_value + 1 == max_num_edges) { log_max_num_edges_reached(); } @@ -304,7 +304,7 @@ static DeprecatedEdgeList::NodePtr _pending_head = nullptr; static DeprecatedEdgeList::NodePtr _pending_tail = nullptr; inline DeprecatedEdgeList::NodePtr pending_head() { - return Atomic::load(&_pending_head); + return AtomicAccess::load(&_pending_head); } // The test for a pending head can be read concurrently from a thread doing class unloading. @@ -317,7 +317,7 @@ inline static bool no_pending_head() { } inline static void set_pending_head(DeprecatedEdgeList::NodePtr head) { - Atomic::store(&_pending_head, head); + AtomicAccess::store(&_pending_head, head); } class PendingListProcessor { diff --git a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp index 037faee1b9f6d..480d28f8c55dc 100644 --- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp @@ -41,7 +41,7 @@ #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/arena.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/os.hpp" #include "runtime/threadIdentifier.hpp" @@ -322,7 +322,7 @@ void JfrThreadLocal::set(bool* exclusion_field, bool state) { } bool JfrThreadLocal::is_vthread_excluded() const { - return Atomic::load(&_vthread_excluded); + return AtomicAccess::load(&_vthread_excluded); } bool JfrThreadLocal::is_jvm_thread_excluded(const Thread* t) { @@ -337,7 +337,7 @@ void JfrThreadLocal::exclude_vthread(const JavaThread* jt) { void JfrThreadLocal::include_vthread(const JavaThread* jt) { JfrThreadLocal* const tl = jt->jfr_thread_local(); - Atomic::store(&tl->_vthread_epoch, static_cast(0)); + AtomicAccess::store(&tl->_vthread_epoch, static_cast(0)); set(&tl->_vthread_excluded, false); JfrJavaEventWriter::include(vthread_id(jt), jt); } @@ -357,7 +357,7 @@ void JfrThreadLocal::include_jvm_thread(const Thread* t) { } bool JfrThreadLocal::is_excluded() const { - return Atomic::load_acquire(&_vthread) ? is_vthread_excluded(): _jvm_thread_excluded; + return AtomicAccess::load_acquire(&_vthread) ? is_vthread_excluded(): _jvm_thread_excluded; } bool JfrThreadLocal::is_included() const { @@ -401,7 +401,7 @@ void JfrThreadLocal::set_vthread_epoch(const JavaThread* jt, traceid tid, u2 epo assert(is_vthread(jt), "invariant"); assert(!is_non_reentrant(), "invariant"); - Atomic::store(&jt->jfr_thread_local()->_vthread_epoch, epoch); + AtomicAccess::store(&jt->jfr_thread_local()->_vthread_epoch, epoch); oop vthread = jt->vthread(); assert(vthread != nullptr, "invariant"); @@ -427,7 +427,7 @@ void JfrThreadLocal::set_vthread_epoch_checked(const JavaThread* jt, traceid tid traceid JfrThreadLocal::vthread_id(const Thread* t) { assert(t != nullptr, "invariant"); - return Atomic::load(&t->jfr_thread_local()->_vthread_id); + return AtomicAccess::load(&t->jfr_thread_local()->_vthread_id); } traceid JfrThreadLocal::vthread_id_with_epoch_update(const JavaThread* jt) const { @@ -445,13 +445,13 @@ traceid JfrThreadLocal::vthread_id_with_epoch_update(const JavaThread* jt) const u2 JfrThreadLocal::vthread_epoch(const JavaThread* jt) { assert(jt != nullptr, "invariant"); - return Atomic::load(&jt->jfr_thread_local()->_vthread_epoch); + return AtomicAccess::load(&jt->jfr_thread_local()->_vthread_epoch); } bool JfrThreadLocal::should_write() const { const u2 current_generation = JfrTraceIdEpoch::epoch_generation(); - if (Atomic::load(&_generation) != current_generation) { - Atomic::store(&_generation, current_generation); + if (AtomicAccess::load(&_generation) != current_generation) { + AtomicAccess::store(&_generation, current_generation); return true; } return false; @@ -504,7 +504,7 @@ traceid JfrThreadLocal::assign_thread_id(const Thread* t, JfrThreadLocal* tl) { if (t->is_Java_thread()) { tid = load_java_thread_id(t); tl->_jvm_thread_id = tid; - Atomic::store(&tl->_vthread_id, tid); + AtomicAccess::store(&tl->_vthread_id, tid); return tid; } tid = static_cast(ThreadIdentifier::next()); @@ -525,7 +525,7 @@ traceid JfrThreadLocal::jvm_thread_id(const Thread* t) { bool JfrThreadLocal::is_vthread(const JavaThread* jt) { assert(jt != nullptr, "invariant"); - return Atomic::load_acquire(&jt->jfr_thread_local()->_vthread) && jt->last_continuation() != nullptr; + return AtomicAccess::load_acquire(&jt->jfr_thread_local()->_vthread) && jt->last_continuation() != nullptr; } int32_t JfrThreadLocal::make_non_reentrant(Thread* t) { @@ -558,18 +558,18 @@ void JfrThreadLocal::on_set_current_thread(JavaThread* jt, oop thread) { assert(thread != nullptr, "invariant"); JfrThreadLocal* const tl = jt->jfr_thread_local(); if (!is_virtual(jt, thread)) { - Atomic::release_store(&tl->_vthread, false); + AtomicAccess::release_store(&tl->_vthread, false); return; } assert(tl->_non_reentrant_nesting == 0, "invariant"); - Atomic::store(&tl->_vthread_id, AccessThreadTraceId::id(thread)); + AtomicAccess::store(&tl->_vthread_id, AccessThreadTraceId::id(thread)); const u2 epoch_raw = AccessThreadTraceId::epoch(thread); const bool excluded = epoch_raw & excluded_bit; - Atomic::store(&tl->_vthread_excluded, excluded); + AtomicAccess::store(&tl->_vthread_excluded, excluded); if (!excluded) { - Atomic::store(&tl->_vthread_epoch, static_cast(epoch_raw & epoch_mask)); + AtomicAccess::store(&tl->_vthread_epoch, static_cast(epoch_raw & epoch_mask)); } - Atomic::release_store(&tl->_vthread, true); + AtomicAccess::release_store(&tl->_vthread, true); } Arena* JfrThreadLocal::dcmd_arena(JavaThread* jt) { @@ -607,21 +607,21 @@ timer_t* JfrThreadLocal::cpu_timer() const { } bool JfrThreadLocal::is_cpu_time_jfr_enqueue_locked() { - return Atomic::load_acquire(&_cpu_time_jfr_locked) == ENQUEUE; + return AtomicAccess::load_acquire(&_cpu_time_jfr_locked) == ENQUEUE; } bool JfrThreadLocal::is_cpu_time_jfr_dequeue_locked() { - return Atomic::load_acquire(&_cpu_time_jfr_locked) == DEQUEUE; + return AtomicAccess::load_acquire(&_cpu_time_jfr_locked) == DEQUEUE; } bool JfrThreadLocal::try_acquire_cpu_time_jfr_enqueue_lock() { - return Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, ENQUEUE) == UNLOCKED; + return AtomicAccess::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, ENQUEUE) == UNLOCKED; } bool JfrThreadLocal::try_acquire_cpu_time_jfr_dequeue_lock() { CPUTimeLockState got; while (true) { - CPUTimeLockState got = Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE); + CPUTimeLockState got = AtomicAccess::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE); if (got == UNLOCKED) { return true; // successfully locked for dequeue } @@ -634,21 +634,21 @@ bool JfrThreadLocal::try_acquire_cpu_time_jfr_dequeue_lock() { void JfrThreadLocal::acquire_cpu_time_jfr_dequeue_lock() { SpinYield s; - while (Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE) != UNLOCKED) { + while (AtomicAccess::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE) != UNLOCKED) { s.wait(); } } void JfrThreadLocal::release_cpu_time_jfr_queue_lock() { - Atomic::release_store(&_cpu_time_jfr_locked, UNLOCKED); + AtomicAccess::release_store(&_cpu_time_jfr_locked, UNLOCKED); } void JfrThreadLocal::set_has_cpu_time_jfr_requests(bool has_requests) { - Atomic::release_store(&_has_cpu_time_jfr_requests, has_requests); + AtomicAccess::release_store(&_has_cpu_time_jfr_requests, has_requests); } bool JfrThreadLocal::has_cpu_time_jfr_requests() { - return Atomic::load_acquire(&_has_cpu_time_jfr_requests); + return AtomicAccess::load_acquire(&_has_cpu_time_jfr_requests); } JfrCPUTimeTraceQueue& JfrThreadLocal::cpu_time_jfr_queue() { @@ -660,11 +660,11 @@ void JfrThreadLocal::deallocate_cpu_time_jfr_queue() { } void JfrThreadLocal::set_do_async_processing_of_cpu_time_jfr_requests(bool wants) { - Atomic::release_store(&_do_async_processing_of_cpu_time_jfr_requests, wants); + AtomicAccess::release_store(&_do_async_processing_of_cpu_time_jfr_requests, wants); } bool JfrThreadLocal::wants_async_processing_of_cpu_time_jfr_requests() { - return Atomic::load_acquire(&_do_async_processing_of_cpu_time_jfr_requests); + return AtomicAccess::load_acquire(&_do_async_processing_of_cpu_time_jfr_requests); } #endif diff --git a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp index 8c82dfad8af8f..5fff03d4408c7 100644 --- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp @@ -30,7 +30,7 @@ #include "jfr/utilities/jfrBlob.hpp" #include "jfr/utilities/jfrTime.hpp" #include "jfr/utilities/jfrTypes.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #ifdef LINUX @@ -169,11 +169,11 @@ class JfrThreadLocal { int sample_state() const { - return Atomic::load_acquire(&_sample_state); + return AtomicAccess::load_acquire(&_sample_state); } void set_sample_state(int state) { - Atomic::release_store(&_sample_state, state); + AtomicAccess::release_store(&_sample_state, state); } Monitor* sample_monitor() { @@ -209,14 +209,14 @@ class JfrThreadLocal { } bool has_enqueued_requests() const { - return Atomic::load_acquire(&_enqueued_requests); + return AtomicAccess::load_acquire(&_enqueued_requests); } void enqueue_request() { assert_lock_strong(sample_monitor()); assert(sample_state() == JAVA_SAMPLE, "invariant"); if (_sample_request_queue.append(_sample_request) == 0) { - Atomic::release_store(&_enqueued_requests, true); + AtomicAccess::release_store(&_enqueued_requests, true); } set_sample_state(NO_SAMPLE); } @@ -226,7 +226,7 @@ class JfrThreadLocal { assert(has_enqueued_requests(), "invariant"); assert(_sample_request_queue.is_nonempty(), "invariant"); _sample_request_queue.clear(); - Atomic::release_store(&_enqueued_requests, false); + AtomicAccess::release_store(&_enqueued_requests, false); } bool has_native_sample_request() const { diff --git a/src/hotspot/share/jfr/support/methodtracer/jfrFilterManager.cpp b/src/hotspot/share/jfr/support/methodtracer/jfrFilterManager.cpp index 00e6b8b6e2abb..d9081efa08cc5 100644 --- a/src/hotspot/share/jfr/support/methodtracer/jfrFilterManager.cpp +++ b/src/hotspot/share/jfr/support/methodtracer/jfrFilterManager.cpp @@ -78,12 +78,12 @@ static void add_previous_filter(const JfrFilter* previous_filter) { } const JfrFilter* JfrFilterManager::current() { - return Atomic::load_acquire(&_current); + return AtomicAccess::load_acquire(&_current); } void JfrFilterManager::install(const JfrFilter* new_filter) { assert(new_filter != nullptr, "invariant"); - add_previous_filter(Atomic::xchg(&_current, new_filter)); + add_previous_filter(AtomicAccess::xchg(&_current, new_filter)); new_filter->log("New filter installed"); } diff --git a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp index faf0baf2bcb2c..a1418f1ffbf40 100644 --- a/src/hotspot/share/jfr/utilities/jfrAllocation.cpp +++ b/src/hotspot/share/jfr/utilities/jfrAllocation.cpp @@ -27,7 +27,7 @@ #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "nmt/memTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" #include "utilities/nativeCallStack.hpp" @@ -39,7 +39,7 @@ static jlong atomic_add_jlong(jlong value, jlong volatile* const dest) { do { compare_value = *dest; exchange_value = compare_value + value; - } while (Atomic::cmpxchg(dest, compare_value, exchange_value) != compare_value); + } while (AtomicAccess::cmpxchg(dest, compare_value, exchange_value) != compare_value); return exchange_value; } diff --git a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp index 142c63c053382..d597cc08e4c8f 100644 --- a/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrConcurrentLinkedListHost.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "jfr/utilities/jfrRelation.hpp" #include "jfr/utilities/jfrTypes.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" /* @@ -67,7 +67,7 @@ Node* find_adjacent(Node* head, const Node* tail, Node** predecessor, VersionHan while (true) { Node* current = head; version_handle->checkout(); - Node* next = Atomic::load_acquire(¤t->_next); + Node* next = AtomicAccess::load_acquire(¤t->_next); do { assert(next != nullptr, "invariant"); Node* const unmasked_next = unmask(next); @@ -158,7 +158,7 @@ void JfrConcurrentLinkedListHost::insert_tail // Invariant: [predecessor] --> tail assert(is_marked_for_insertion(predecessor->_next), "invariant"); assert(predecessor != head, "invariant"); - if (Atomic::load_acquire(&last->_next) == predecessor) { + if (AtomicAccess::load_acquire(&last->_next) == predecessor) { /* Even after we store the new node into the last->_next field, there is no race because it is also marked with the insertion bit. */ last->_next = node; @@ -225,7 +225,7 @@ typename Client::Node* JfrConcurrentLinkedListHost excise(successor); find_adjacent(head, tail, &predecessor, version_handle, excise); } - if (last != nullptr && Atomic::load_acquire(&last->_next) == successor) { + if (last != nullptr && AtomicAccess::load_acquire(&last->_next) == successor) { guarantee(!insert_is_head, "invariant"); guarantee(successor_next == tail, "invariant"); LastNode excise; @@ -249,7 +249,7 @@ bool JfrConcurrentLinkedListHost::in_list(con VersionHandle version_handle = _client->get_version_handle(); const Node* current = head; version_handle->checkout(); - const Node* next = Atomic::load_acquire(¤t->_next); + const Node* next = AtomicAccess::load_acquire(¤t->_next); while (true) { if (!is_marked_for_removal(next)) { if (current == node) { @@ -274,7 +274,7 @@ inline void JfrConcurrentLinkedListHost::iter VersionHandle version_handle = _client->get_version_handle(); NodePtr current = head; version_handle->checkout(); - NodePtr next = Atomic::load_acquire(¤t->_next); + NodePtr next = AtomicAccess::load_acquire(¤t->_next); while (true) { if (!is_marked_for_removal(next)) { if (!cb.process(current)) { diff --git a/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp b/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp index 11c10ee9002cf..ae744edcb48b6 100644 --- a/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrConcurrentQueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ bool JfrConcurrentQueue::initialize() { template inline bool JfrConcurrentQueue::is_empty() const { - return Atomic::load_acquire(&_head._next) == &_tail; + return AtomicAccess::load_acquire(&_head._next) == &_tail; } template diff --git a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp index 530f19ac2c684..0be2d92ed197d 100644 --- a/src/hotspot/share/jfr/utilities/jfrHashtable.hpp +++ b/src/hotspot/share/jfr/utilities/jfrHashtable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "jfr/utilities/jfrAllocation.hpp" #include "nmt/memTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" @@ -59,9 +59,9 @@ class JfrHashtableBucket : public CHeapObj { TableEntry* _entry; TableEntry* get_entry() const { - return (TableEntry*)Atomic::load_acquire(&_entry); + return (TableEntry*)AtomicAccess::load_acquire(&_entry); } - void set_entry(TableEntry* entry) { Atomic::release_store(&_entry, entry);} + void set_entry(TableEntry* entry) { AtomicAccess::release_store(&_entry, entry);} TableEntry** entry_addr() { return &_entry; } }; diff --git a/src/hotspot/share/jfr/utilities/jfrLinkedList.inline.hpp b/src/hotspot/share/jfr/utilities/jfrLinkedList.inline.hpp index 8b5b2a0965c0b..fed379672dc53 100644 --- a/src/hotspot/share/jfr/utilities/jfrLinkedList.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrLinkedList.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "jfr/utilities/jfrLinkedList.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" template JfrLinkedList::JfrLinkedList() : _head(nullptr) {} @@ -39,7 +39,7 @@ bool JfrLinkedList::initialize() { template inline NodeType* JfrLinkedList::head() const { - return (NodeType*)Atomic::load_acquire(&_head); + return (NodeType*)AtomicAccess::load_acquire(&_head); } template @@ -59,7 +59,7 @@ inline void JfrLinkedList::add(NodeType* node) { do { next = head(); node->_next = next; - } while (Atomic::cmpxchg(&_head, next, node) != next); + } while (AtomicAccess::cmpxchg(&_head, next, node) != next); } template @@ -70,7 +70,7 @@ inline NodeType* JfrLinkedList::remove() { node = head(); if (node == nullptr) break; next = (NodePtr)node->_next; - } while (Atomic::cmpxchg(&_head, node, next) != node); + } while (AtomicAccess::cmpxchg(&_head, node, next) != node); return node; } @@ -91,7 +91,7 @@ template NodeType* JfrLinkedList::excise(NodeType* prev, NodeType* node) { NodePtr next = (NodePtr)node->_next; if (prev == nullptr) { - prev = Atomic::cmpxchg(&_head, node, next); + prev = AtomicAccess::cmpxchg(&_head, node, next); if (prev == node) { return nullptr; } @@ -123,7 +123,7 @@ NodeType* JfrLinkedList::cut() { NodePtr node; do { node = head(); - } while (Atomic::cmpxchg(&_head, node, (NodeType*)nullptr) != node); + } while (AtomicAccess::cmpxchg(&_head, node, (NodeType*)nullptr) != node); return node; } @@ -136,7 +136,7 @@ void JfrLinkedList::clear() { template inline void JfrLinkedList::add_list(NodeType* first) { assert(head() == nullptr, "invariant"); - Atomic::store(&_head, first); + AtomicAccess::store(&_head, first); } #endif // SHARE_JFR_UTILITIES_JFRLINKEDLIST_INLINE_HPP diff --git a/src/hotspot/share/jfr/utilities/jfrNode.hpp b/src/hotspot/share/jfr/utilities/jfrNode.hpp index de50c05b97b5b..008d4d672506d 100644 --- a/src/hotspot/share/jfr/utilities/jfrNode.hpp +++ b/src/hotspot/share/jfr/utilities/jfrNode.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "jfr/utilities/jfrTypes.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" const uint64_t JFR_NODE_LOGICAL_EXCISION_BIT = 1; const uint64_t JFR_NODE_LOGICAL_INSERTION_BIT = 2; @@ -35,7 +35,7 @@ const uint64_t JFR_NODE_MASK = ~(JFR_NODE_LOGICAL_INSERTION_BIT | JFR_NODE_LOGIC template inline bool cas(Node** address, Node* current, Node* exchange) { - return Atomic::cmpxchg(address, current, exchange) == current; + return AtomicAccess::cmpxchg(address, current, exchange) == current; } template diff --git a/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp b/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp index 18c21c850e7bd..5996ccacf6ce3 100644 --- a/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp +++ b/src/hotspot/share/jfr/utilities/jfrRefCountPointer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_JFR_UTILITIES_JFRREFCOUNTPOINTER_HPP #include "jfr/utilities/jfrAllocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" template class RefCountHandle { @@ -112,11 +112,11 @@ class MultiThreadedRefCounter { MultiThreadedRefCounter() : _refs(0) {} void inc() const { - Atomic::inc(&_refs, memory_order_relaxed); + AtomicAccess::inc(&_refs, memory_order_relaxed); } bool dec() const { - if (0 == Atomic::sub(&_refs, 1, memory_order_release)) { + if (0 == AtomicAccess::sub(&_refs, 1, memory_order_release)) { OrderAccess::acquire(); return true; } @@ -124,7 +124,7 @@ class MultiThreadedRefCounter { } intptr_t current() const { - return Atomic::load(&_refs); + return AtomicAccess::load(&_refs); } }; diff --git a/src/hotspot/share/jfr/utilities/jfrSignal.hpp b/src/hotspot/share/jfr/utilities/jfrSignal.hpp index acfc26c5abe6d..99690ba689199 100644 --- a/src/hotspot/share/jfr/utilities/jfrSignal.hpp +++ b/src/hotspot/share/jfr/utilities/jfrSignal.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_JFR_UTILITIES_JFRSIGNAL_HPP #define SHARE_JFR_UTILITIES_JFRSIGNAL_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" class JfrSignal { private: @@ -34,15 +34,15 @@ class JfrSignal { JfrSignal() : _signaled(false) {} void signal() const { - Atomic::release_store(&_signaled, true); + AtomicAccess::release_store(&_signaled, true); } void reset() const { - Atomic::release_store(&_signaled, false); + AtomicAccess::release_store(&_signaled, false); } bool is_signaled() const { - return Atomic::load_acquire(&_signaled); + return AtomicAccess::load_acquire(&_signaled); } void signal_if_not_set() const { diff --git a/src/hotspot/share/jfr/utilities/jfrTryLock.hpp b/src/hotspot/share/jfr/utilities/jfrTryLock.hpp index 513e550348b8b..4115c41046dc0 100644 --- a/src/hotspot/share/jfr/utilities/jfrTryLock.hpp +++ b/src/hotspot/share/jfr/utilities/jfrTryLock.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_JFR_UTILITIES_JFRTRYLOCK_HPP #define SHARE_JFR_UTILITIES_JFRTRYLOCK_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" @@ -36,7 +36,7 @@ class JfrTryLock { bool _acquired; public: - JfrTryLock(volatile int* lock) : _lock(lock), _acquired(Atomic::cmpxchg(lock, 0, 1) == 0) {} + JfrTryLock(volatile int* lock) : _lock(lock), _acquired(AtomicAccess::cmpxchg(lock, 0, 1) == 0) {} ~JfrTryLock() { if (_acquired) { diff --git a/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp b/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp index d61a9948c9547..bc8ed75d567af 100644 --- a/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp +++ b/src/hotspot/share/jfr/utilities/jfrVersionSystem.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "jfr/utilities/jfrVersionSystem.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" inline JfrVersionSystem::JfrVersionSystem() : _tip(), _head(nullptr) { @@ -50,7 +50,7 @@ inline void JfrVersionSystem::reset() { } inline JfrVersionSystem::Type JfrVersionSystem::tip() const { - return Atomic::load(&_tip._value); + return AtomicAccess::load(&_tip._value); } inline JfrVersionSystem::Type JfrVersionSystem::inc_tip() { @@ -59,7 +59,7 @@ inline JfrVersionSystem::Type JfrVersionSystem::inc_tip() { do { cmp = _tip._value; xchg = cmp + 1; - } while (Atomic::cmpxchg(&_tip._value, cmp, xchg) != cmp); + } while (AtomicAccess::cmpxchg(&_tip._value, cmp, xchg) != cmp); return xchg; } @@ -67,7 +67,7 @@ inline JfrVersionSystem::NodePtr JfrVersionSystem::acquire() { NodePtr node = _head; // free while (node != nullptr) { - if (node->_live || Atomic::cmpxchg(&node->_live, false, true)) { + if (node->_live || AtomicAccess::cmpxchg(&node->_live, false, true)) { node = node->_next; continue; } @@ -80,7 +80,7 @@ inline JfrVersionSystem::NodePtr JfrVersionSystem::acquire() { do { next = _head; node->_next = next; - } while (Atomic::cmpxchg(&_head, next, node) != next); + } while (AtomicAccess::cmpxchg(&_head, next, node) != next); DEBUG_ONLY(assert_state(node);) return node; } @@ -96,7 +96,7 @@ inline traceid JfrVersionSystem::Node::version() const { } inline void JfrVersionSystem::Node::set(traceid version) const { - Atomic::release_store_fence(&_version, version); + AtomicAccess::release_store_fence(&_version, version); } inline void JfrVersionSystem::Node::add_ref() const { @@ -130,7 +130,7 @@ inline JfrVersionSystem::NodePtr JfrVersionSystem::synchronize_with(JfrVersionSystem::Type version, JfrVersionSystem::NodePtr node) const { assert(version <= tip(), "invariant"); while (node != nullptr) { - const Type checkedout = Atomic::load_acquire(&node->_version); + const Type checkedout = AtomicAccess::load_acquire(&node->_version); if (checkedout > 0 && checkedout < version) { return node; } diff --git a/src/hotspot/share/jvmci/jvmci.cpp b/src/hotspot/share/jvmci/jvmci.cpp index b59c7fa34448a..e9f247f4f029b 100644 --- a/src/hotspot/share/jvmci/jvmci.cpp +++ b/src/hotspot/share/jvmci/jvmci.cpp @@ -34,7 +34,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/os.hpp" #include "utilities/events.hpp" @@ -361,7 +361,7 @@ void JVMCI::fatal_log(const char* buf, size_t count) { intx current_thread_id = os::current_thread_id(); intx invalid_id = -1; int log_fd; - if (_first_error_tid == invalid_id && Atomic::cmpxchg(&_first_error_tid, invalid_id, current_thread_id) == invalid_id) { + if (_first_error_tid == invalid_id && AtomicAccess::cmpxchg(&_first_error_tid, invalid_id, current_thread_id) == invalid_id) { if (ErrorFileToStdout) { log_fd = 1; } else if (ErrorFileToStderr) { diff --git a/src/hotspot/share/jvmci/jvmciCompiler.cpp b/src/hotspot/share/jvmci/jvmciCompiler.cpp index 659297973e547..2e84f6830b8d7 100644 --- a/src/hotspot/share/jvmci/jvmciCompiler.cpp +++ b/src/hotspot/share/jvmci/jvmciCompiler.cpp @@ -215,20 +215,20 @@ void JVMCICompiler::CodeInstallStats::print_on(outputStream* st, const char* pre } void JVMCICompiler::CodeInstallStats::on_install(CodeBlob* cb) { - Atomic::inc(&_count); - Atomic::add(&_codeBlobs_size, cb->size()); - Atomic::add(&_codeBlobs_code_size, cb->code_size()); + AtomicAccess::inc(&_count); + AtomicAccess::add(&_codeBlobs_size, cb->size()); + AtomicAccess::add(&_codeBlobs_code_size, cb->code_size()); } void JVMCICompiler::inc_methods_compiled() { - Atomic::inc(&_methods_compiled); - Atomic::inc(&_global_compilation_ticks); + AtomicAccess::inc(&_methods_compiled); + AtomicAccess::inc(&_global_compilation_ticks); } void JVMCICompiler::on_upcall(const char* error, JVMCICompileState* compile_state) { if (error != nullptr) { - Atomic::inc(&_err_upcalls); + AtomicAccess::inc(&_err_upcalls); int ok = _ok_upcalls; int err = _err_upcalls; // If there have been at least 10 upcalls with an error @@ -257,10 +257,10 @@ void JVMCICompiler::on_upcall(const char* error, JVMCICompileState* compile_stat } JVMCI_event_1("JVMCI upcall had an error: %s", error); } else { - Atomic::inc(&_ok_upcalls); + AtomicAccess::inc(&_ok_upcalls); } } void JVMCICompiler::inc_global_compilation_ticks() { - Atomic::inc(&_global_compilation_ticks); + AtomicAccess::inc(&_global_compilation_ticks); } diff --git a/src/hotspot/share/jvmci/jvmciCompiler.hpp b/src/hotspot/share/jvmci/jvmciCompiler.hpp index 0d03bd08bf65b..11427b975f320 100644 --- a/src/hotspot/share/jvmci/jvmciCompiler.hpp +++ b/src/hotspot/share/jvmci/jvmciCompiler.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "compiler/abstractCompiler.hpp" #include "compiler/compiler_globals.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" class JVMCICompileState; diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index dbc3ebd9c6e37..36a6fae794a91 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -56,7 +56,7 @@ #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index 33aa40acbc24b..c7c3a00a127bb 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -47,7 +47,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/frame.inline.hpp" @@ -1622,7 +1622,7 @@ void JVMCIRuntime::fatal_exception(JVMCIEnv* JVMCIENV, const char* message) { JavaThread* THREAD = JavaThread::current(); // For exception macros. static volatile int report_error = 0; - if (!report_error && Atomic::cmpxchg(&report_error, 0, 1) == 0) { + if (!report_error && AtomicAccess::cmpxchg(&report_error, 0, 1) == 0) { // Only report an error once tty->print_raw_cr(message); if (JVMCIENV != nullptr) { diff --git a/src/hotspot/share/jvmci/metadataHandles.cpp b/src/hotspot/share/jvmci/metadataHandles.cpp index 3df7ebe573ae9..fe4a49a8536c4 100644 --- a/src/hotspot/share/jvmci/metadataHandles.cpp +++ b/src/hotspot/share/jvmci/metadataHandles.cpp @@ -23,7 +23,7 @@ #include "classfile/classLoaderData.hpp" #include "jvmci/metadataHandles.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" jmetadata MetadataHandles::allocate_metadata_handle(Metadata* obj) { assert(obj->is_valid() && obj->is_metadata(), "must be"); @@ -154,7 +154,7 @@ void MetadataHandles::do_unloading() { // but can't be put on the free list yet. The // HandleCleaner will set this to null and // put it on the free list. - jlong old_value = Atomic::cmpxchg((jlong*)handle, (jlong) value, (jlong) (ptr_tag)); + jlong old_value = AtomicAccess::cmpxchg((jlong*)handle, (jlong) value, (jlong) (ptr_tag)); if (old_value == (jlong) value) { // Success } else { diff --git a/src/hotspot/share/logging/logAsyncWriter.cpp b/src/hotspot/share/logging/logAsyncWriter.cpp index d184827f58281..a95df59e264e7 100644 --- a/src/hotspot/share/logging/logAsyncWriter.cpp +++ b/src/hotspot/share/logging/logAsyncWriter.cpp @@ -28,7 +28,7 @@ #include "logging/logFileStreamOutput.hpp" #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" class AsyncLogWriter::Locker : public StackObj { Thread*& _holder; @@ -324,7 +324,7 @@ void AsyncLogWriter::initialize() { for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) { ts->wait_until_no_readers(); } - Atomic::release_store_fence(&AsyncLogWriter::_instance, self); + AtomicAccess::release_store_fence(&AsyncLogWriter::_instance, self); os::start_thread(self); log_debug(logging, thread)("Async logging thread started."); } else { diff --git a/src/hotspot/share/logging/logDecorations.cpp b/src/hotspot/share/logging/logDecorations.cpp index e08c6c9cb137d..267a28ec44cb5 100644 --- a/src/hotspot/share/logging/logDecorations.cpp +++ b/src/hotspot/share/logging/logDecorations.cpp @@ -24,7 +24,7 @@ #include "jvm.h" #include "logging/logConfiguration.hpp" #include "logging/logDecorations.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/os.hpp" #include "services/management.hpp" @@ -33,12 +33,12 @@ const char* volatile LogDecorations::_host_name = nullptr; const int LogDecorations::_pid = os::current_process_id(); // This is safe to call during dynamic initialization. const char* LogDecorations::host_name() { - const char* host_name = Atomic::load_acquire(&_host_name); + const char* host_name = AtomicAccess::load_acquire(&_host_name); if (host_name == nullptr) { char buffer[1024]; if (os::get_host_name(buffer, sizeof(buffer))) { host_name = os::strdup_check_oom(buffer); - const char* old_value = Atomic::cmpxchg(&_host_name, (const char*)nullptr, host_name); + const char* old_value = AtomicAccess::cmpxchg(&_host_name, (const char*)nullptr, host_name); if (old_value != nullptr) { os::free((void *) host_name); host_name = old_value; diff --git a/src/hotspot/share/logging/logOutputList.cpp b/src/hotspot/share/logging/logOutputList.cpp index fab06860a4807..4ae072f1d1bbd 100644 --- a/src/hotspot/share/logging/logOutputList.cpp +++ b/src/hotspot/share/logging/logOutputList.cpp @@ -24,25 +24,25 @@ #include "logging/logLevel.hpp" #include "logging/logOutputList.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "utilities/globalDefinitions.hpp" jint LogOutputList::increase_readers() { - jint result = Atomic::add(&_active_readers, 1); + jint result = AtomicAccess::add(&_active_readers, 1); assert(_active_readers > 0, "Ensure we have consistent state"); return result; } jint LogOutputList::decrease_readers() { - jint result = Atomic::add(&_active_readers, -1); + jint result = AtomicAccess::add(&_active_readers, -1); assert(result >= 0, "Ensure we have consistent state"); return result; } void LogOutputList::wait_until_no_readers() const { OrderAccess::storeload(); - while (Atomic::load(&_active_readers) != 0) { + while (AtomicAccess::load(&_active_readers) != 0) { // Busy wait } // Prevent mutations to the output list to float above the active reader check. @@ -133,16 +133,16 @@ void LogOutputList::add_output(LogOutput* output, LogLevelType level) { // Update the _level_start index for (int l = LogLevel::Last; l >= level; l--) { - LogOutputNode* lnode = Atomic::load(&_level_start[l]); + LogOutputNode* lnode = AtomicAccess::load(&_level_start[l]); if (lnode == nullptr || lnode->_level < level) { - Atomic::store(&_level_start[l], node); + AtomicAccess::store(&_level_start[l], node); } } // Add the node the list - for (LogOutputNode* cur = Atomic::load(&_level_start[LogLevel::Last]); cur != nullptr; cur = Atomic::load(&cur->_next)) { - if (cur != node && Atomic::load(&cur->_next) == node->_next) { - Atomic::store(&cur->_next, node); + for (LogOutputNode* cur = AtomicAccess::load(&_level_start[LogLevel::Last]); cur != nullptr; cur = AtomicAccess::load(&cur->_next)) { + if (cur != node && AtomicAccess::load(&cur->_next) == node->_next) { + AtomicAccess::store(&cur->_next, node); break; } } diff --git a/src/hotspot/share/logging/logOutputList.hpp b/src/hotspot/share/logging/logOutputList.hpp index e081c02b2b68f..1e2299e92650d 100644 --- a/src/hotspot/share/logging/logOutputList.hpp +++ b/src/hotspot/share/logging/logOutputList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "logging/logLevel.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" class LogOutput; @@ -127,7 +127,7 @@ class LogOutputList { void operator++(int) { // FIXME: memory_order_consume could be used here. // Atomic access on the reading side for LogOutputList. - _current = Atomic::load_acquire(&_current->_next); + _current = AtomicAccess::load_acquire(&_current->_next); } bool operator!=(const LogOutputNode *ref) const { @@ -143,7 +143,7 @@ class LogOutputList { increase_readers(); // FIXME: memory_order_consume could be used here. // Atomic access on the reading side for LogOutputList. - return Iterator(this, Atomic::load_acquire(&_level_start[level])); + return Iterator(this, AtomicAccess::load_acquire(&_level_start[level])); } LogOutputNode* end() const { diff --git a/src/hotspot/share/logging/logTagSet.cpp b/src/hotspot/share/logging/logTagSet.cpp index 4719d0a926ee4..667c76ec306db 100644 --- a/src/hotspot/share/logging/logTagSet.cpp +++ b/src/hotspot/share/logging/logTagSet.cpp @@ -76,7 +76,7 @@ void LogTagSet::log(LogLevelType level, const char* msg) { // happen before the creation of LogDecorations instance so // wait_until_no_readers() in LogConfiguration::configure_output() // synchronizes _decorations as well. The order is guaranteed by - // the implied memory order of Atomic::add(). + // the implied memory order of AtomicAccess::add(). LogOutputList::Iterator it = _output_list.iterator(level); LogDecorations decorations(level, *this, _decorators); diff --git a/src/hotspot/share/memory/allocation.inline.hpp b/src/hotspot/share/memory/allocation.inline.hpp index 01af1616ce18f..5561cdbe6f718 100644 --- a/src/hotspot/share/memory/allocation.inline.hpp +++ b/src/hotspot/share/memory/allocation.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" @@ -41,8 +41,8 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) { #ifdef _LP64 *dest += add_value; #else - julong value = Atomic::load(dest); - Atomic::store(dest, value + add_value); + julong value = AtomicAccess::load(dest); + AtomicAccess::store(dest, value + add_value); #endif } #endif diff --git a/src/hotspot/share/memory/heap.hpp b/src/hotspot/share/memory/heap.hpp index d0e4230fe2b21..5056f0f6c2151 100644 --- a/src/hotspot/share/memory/heap.hpp +++ b/src/hotspot/share/memory/heap.hpp @@ -28,7 +28,7 @@ #include "code/codeBlob.hpp" #include "memory/allocation.hpp" #include "memory/virtualspace.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/macros.hpp" class ReservedSpace; @@ -213,7 +213,7 @@ class CodeHeap : public CHeapObj { int adapter_count() { return _adapter_count; } void set_adapter_count(int count) { _adapter_count = count; } int full_count() { return _full_count; } - int report_full() { return Atomic::add(&_full_count, 1); } + int report_full() { return AtomicAccess::add(&_full_count, 1); } private: size_t heap_unallocated_capacity() const; diff --git a/src/hotspot/share/memory/heapInspection.cpp b/src/hotspot/share/memory/heapInspection.cpp index 4d7e6c9369c39..aae3c99a63461 100644 --- a/src/hotspot/share/memory/heapInspection.cpp +++ b/src/hotspot/share/memory/heapInspection.cpp @@ -34,7 +34,7 @@ #include "memory/universe.hpp" #include "nmt/memTracker.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -539,7 +539,7 @@ class RecordInstanceClosure : public ObjectClosure { void ParHeapInspectTask::work(uint worker_id) { uintx missed_count = 0; bool merge_success = true; - if (!Atomic::load(&_success)) { + if (!AtomicAccess::load(&_success)) { // other worker has failed on parallel iteration. return; } @@ -547,7 +547,7 @@ void ParHeapInspectTask::work(uint worker_id) { KlassInfoTable cit(false); if (cit.allocation_failed()) { // fail to allocate memory, stop parallel mode - Atomic::store(&_success, false); + AtomicAccess::store(&_success, false); return; } RecordInstanceClosure ric(&cit, _filter); @@ -558,9 +558,9 @@ void ParHeapInspectTask::work(uint worker_id) { merge_success = _shared_cit->merge(&cit); } if (merge_success) { - Atomic::add(&_missed_count, missed_count); + AtomicAccess::add(&_missed_count, missed_count); } else { - Atomic::store(&_success, false); + AtomicAccess::store(&_success, false); } } diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 2a744c12c0195..1e3b8d0594f0e 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -54,7 +54,7 @@ #include "oops/compressedKlass.inline.hpp" #include "oops/compressedOops.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" @@ -319,7 +319,7 @@ size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { } size_t MetaspaceGC::capacity_until_GC() { - size_t value = Atomic::load_acquire(&_capacity_until_GC); + size_t value = AtomicAccess::load_acquire(&_capacity_until_GC); assert(value >= MetaspaceSize, "Not initialized properly?"); return value; } @@ -353,7 +353,7 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size if (can_retry != nullptr) { *can_retry = true; } - size_t prev_value = Atomic::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value); + size_t prev_value = AtomicAccess::cmpxchg(&_capacity_until_GC, old_capacity_until_GC, new_value); if (old_capacity_until_GC != prev_value) { return false; @@ -371,7 +371,7 @@ bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { assert_is_aligned(v, Metaspace::commit_alignment()); - return Atomic::sub(&_capacity_until_GC, v); + return AtomicAccess::sub(&_capacity_until_GC, v); } void MetaspaceGC::initialize() { diff --git a/src/hotspot/share/memory/metaspace/counters.hpp b/src/hotspot/share/memory/metaspace/counters.hpp index c7c841df42373..67789a9f7afa9 100644 --- a/src/hotspot/share/memory/metaspace/counters.hpp +++ b/src/hotspot/share/memory/metaspace/counters.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -26,7 +26,7 @@ #ifndef SHARE_MEMORY_METASPACE_COUNTERS_HPP #define SHARE_MEMORY_METASPACE_COUNTERS_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -93,22 +93,22 @@ class AbstractAtomicCounter { AbstractAtomicCounter() : _c(0) {} - T get() const { return Atomic::load(&_c); } + T get() const { return AtomicAccess::load(&_c); } void increment() { - Atomic::inc(&_c, memory_order_relaxed); + AtomicAccess::inc(&_c, memory_order_relaxed); } void decrement() { - Atomic::dec(&_c, memory_order_relaxed); + AtomicAccess::dec(&_c, memory_order_relaxed); } void increment_by(T v) { - Atomic::add(&_c, v, memory_order_relaxed); + AtomicAccess::add(&_c, v, memory_order_relaxed); } void decrement_by(T v) { - Atomic::sub(&_c, v, memory_order_relaxed); + AtomicAccess::sub(&_c, v, memory_order_relaxed); } #ifdef ASSERT diff --git a/src/hotspot/share/memory/metaspace/internalStats.hpp b/src/hotspot/share/memory/metaspace/internalStats.hpp index 9c127568e576d..633ef5a96bf0f 100644 --- a/src/hotspot/share/memory/metaspace/internalStats.hpp +++ b/src/hotspot/share/memory/metaspace/internalStats.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -27,7 +27,7 @@ #define SHARE_MEMORY_METASPACE_INTERNALSTATS_HPP #include "memory/allStatic.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" class outputStream; @@ -108,7 +108,7 @@ class InternalStats : public AllStatic { // incrementors #define INCREMENTOR(name) static void inc_##name() { _##name++; } -#define INCREMENTOR_ATOMIC(name) static void inc_##name() { Atomic::inc(&_##name); } +#define INCREMENTOR_ATOMIC(name) static void inc_##name() { AtomicAccess::inc(&_##name); } ALL_MY_COUNTERS(INCREMENTOR, INCREMENTOR_ATOMIC) #undef INCREMENTOR #undef INCREMENTOR_ATOMIC diff --git a/src/hotspot/share/memory/metaspace/metaspaceArena.cpp b/src/hotspot/share/memory/metaspace/metaspaceArena.cpp index 709bbfd90a119..8fc89a40203fa 100644 --- a/src/hotspot/share/memory/metaspace/metaspaceArena.cpp +++ b/src/hotspot/share/memory/metaspace/metaspaceArena.cpp @@ -38,7 +38,7 @@ #include "memory/metaspace/metaspaceSettings.hpp" #include "memory/metaspace/metaspaceStatistics.hpp" #include "memory/metaspace/virtualSpaceList.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/init.hpp" #include "runtime/mutexLocker.hpp" #include "services/memoryService.hpp" diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp index c4b112defb44e..9fec3f704ca53 100644 --- a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp +++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp @@ -33,7 +33,7 @@ #include "memory/metaspace/metaspaceContext.hpp" #include "memory/metaspace/virtualSpaceList.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" namespace metaspace { @@ -98,7 +98,7 @@ void VirtualSpaceList::create_new_node() { _commit_limiter, &_reserved_words_counter, &_committed_words_counter); vsn->set_next(_first_node); - Atomic::release_store(&_first_node, vsn); + AtomicAccess::release_store(&_first_node, vsn); _nodes_counter.increment(); } @@ -189,7 +189,7 @@ void VirtualSpaceList::verify() const { // Returns true if this pointer is contained in one of our nodes. bool VirtualSpaceList::contains(const MetaWord* p) const { // Note: needs to work without locks. - const VirtualSpaceNode* vsn = Atomic::load_acquire(&_first_node); + const VirtualSpaceNode* vsn = AtomicAccess::load_acquire(&_first_node); while (vsn != nullptr) { if (vsn->contains(p)) { return true; diff --git a/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp b/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp index b5eff08e740e7..325d708bf2bbb 100644 --- a/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp +++ b/src/hotspot/share/memory/metaspaceCriticalAllocation.cpp @@ -77,7 +77,7 @@ MetadataAllocationRequest* MetaspaceCriticalAllocation::_requests_tail = nullptr void MetaspaceCriticalAllocation::add(MetadataAllocationRequest* request) { MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); log_info(metaspace)("Requesting critical metaspace allocation; almost out of memory"); - Atomic::store(&_has_critical_allocation, true); + AtomicAccess::store(&_has_critical_allocation, true); // This is called by the request constructor to insert the request into the // global list. The request's destructor will remove the request from the // list. gcc13 has a false positive warning about the local request being @@ -179,7 +179,7 @@ void MetaspaceCriticalAllocation::wait_for_purge(MetadataAllocationRequest* requ } void MetaspaceCriticalAllocation::block_if_concurrent_purge() { - if (Atomic::load(&_has_critical_allocation)) { + if (AtomicAccess::load(&_has_critical_allocation)) { // If there is a concurrent Metaspace::purge() operation, we will block here, // to make sure critical allocations get precedence and don't get starved. MutexLocker ml(MetaspaceCritical_lock, Mutex::_no_safepoint_check_flag); @@ -205,7 +205,7 @@ void MetaspaceCriticalAllocation::process() { curr->set_result(result); } if (all_satisfied) { - Atomic::store(&_has_critical_allocation, false); + AtomicAccess::store(&_has_critical_allocation, false); } MetaspaceCritical_lock->notify_all(); } diff --git a/src/hotspot/share/memory/resourceArea.cpp b/src/hotspot/share/memory/resourceArea.cpp index 7b0de1fd71708..bab652c17ea96 100644 --- a/src/hotspot/share/memory/resourceArea.cpp +++ b/src/hotspot/share/memory/resourceArea.cpp @@ -25,7 +25,7 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.inline.hpp" #include "nmt/memTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "utilities/vmError.hpp" @@ -49,8 +49,8 @@ void ResourceArea::verify_has_resource_mark() { // is missing a ResourceMark, to avoid possible recursive errors // in error handling. static volatile bool reported = false; - if (!Atomic::load(&reported)) { - if (!Atomic::cmpxchg(&reported, false, true)) { + if (!AtomicAccess::load(&reported)) { + if (!AtomicAccess::cmpxchg(&reported, false, true)) { fatal("memory leak: allocating without ResourceMark"); } } diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index f34a771138d42..9251fecbb6cc4 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -70,7 +70,7 @@ #include "oops/typeArrayKlass.hpp" #include "prims/resolvedMethodTable.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/cpuTimeCounters.hpp" #include "runtime/flags/jvmFlagLimit.hpp" #include "runtime/handles.inline.hpp" @@ -738,7 +738,7 @@ oop Universe::gen_out_of_memory_error(oop default_err) { int next; if ((_preallocated_out_of_memory_error_avail_count > 0) && vmClasses::Throwable_klass()->is_initialized()) { - next = (int)Atomic::add(&_preallocated_out_of_memory_error_avail_count, -1); + next = (int)AtomicAccess::add(&_preallocated_out_of_memory_error_avail_count, -1); assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); } else { next = -1; diff --git a/src/hotspot/share/nmt/mallocSiteTable.cpp b/src/hotspot/share/nmt/mallocSiteTable.cpp index 55fa5f0b173ed..c9ddffce5ecb7 100644 --- a/src/hotspot/share/nmt/mallocSiteTable.cpp +++ b/src/hotspot/share/nmt/mallocSiteTable.cpp @@ -24,7 +24,7 @@ #include "memory/allocation.inline.hpp" #include "nmt/mallocSiteTable.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/permitForbiddenFunctions.hpp" @@ -123,7 +123,7 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* if (entry == nullptr) return nullptr; // swap in the head - if (Atomic::replace_if_null(&_table[index], entry)) { + if (AtomicAccess::replace_if_null(&_table[index], entry)) { *marker = build_marker(index, 0); return entry->data(); } @@ -250,5 +250,5 @@ void MallocSiteTable::print_tuning_statistics(outputStream* st) { } bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) { - return Atomic::replace_if_null(&_next, entry); + return AtomicAccess::replace_if_null(&_next, entry); } diff --git a/src/hotspot/share/nmt/mallocSiteTable.hpp b/src/hotspot/share/nmt/mallocSiteTable.hpp index 472bc397dd49e..e4adff9cc50c5 100644 --- a/src/hotspot/share/nmt/mallocSiteTable.hpp +++ b/src/hotspot/share/nmt/mallocSiteTable.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "nmt/allocationSite.hpp" #include "nmt/mallocTracker.hpp" #include "nmt/nmtCommon.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/macros.hpp" #include "utilities/nativeCallStack.hpp" diff --git a/src/hotspot/share/nmt/mallocTracker.cpp b/src/hotspot/share/nmt/mallocTracker.cpp index ab3cb3221075d..75089dffc3014 100644 --- a/src/hotspot/share/nmt/mallocTracker.cpp +++ b/src/hotspot/share/nmt/mallocTracker.cpp @@ -35,7 +35,7 @@ #include "nmt/mallocTracker.hpp" #include "nmt/memTracker.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "runtime/safefetch.hpp" @@ -50,7 +50,7 @@ MallocMemorySnapshot MallocMemorySummary::_snapshot; void MemoryCounter::update_peak(size_t size, size_t cnt) { size_t peak_sz = peak_size(); while (peak_sz < size) { - size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed); + size_t old_sz = AtomicAccess::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed); if (old_sz == peak_sz) { // I won _peak_count = cnt; diff --git a/src/hotspot/share/nmt/mallocTracker.hpp b/src/hotspot/share/nmt/mallocTracker.hpp index e71c9374d4b58..0ead41f24110f 100644 --- a/src/hotspot/share/nmt/mallocTracker.hpp +++ b/src/hotspot/share/nmt/mallocTracker.hpp @@ -29,7 +29,7 @@ #include "nmt/mallocHeader.hpp" #include "nmt/memTag.hpp" #include "nmt/nmtCommon.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/nativeCallStack.hpp" class outputStream; @@ -61,9 +61,9 @@ class MemoryCounter { } inline void allocate(size_t sz) { - size_t cnt = Atomic::add(&_count, size_t(1), memory_order_relaxed); + size_t cnt = AtomicAccess::add(&_count, size_t(1), memory_order_relaxed); if (sz > 0) { - size_t sum = Atomic::add(&_size, sz, memory_order_relaxed); + size_t sum = AtomicAccess::add(&_size, sz, memory_order_relaxed); update_peak(sum, cnt); } } @@ -71,29 +71,29 @@ class MemoryCounter { inline void deallocate(size_t sz) { assert(count() > 0, "Nothing allocated yet"); assert(size() >= sz, "deallocation > allocated"); - Atomic::dec(&_count, memory_order_relaxed); + AtomicAccess::dec(&_count, memory_order_relaxed); if (sz > 0) { - Atomic::sub(&_size, sz, memory_order_relaxed); + AtomicAccess::sub(&_size, sz, memory_order_relaxed); } } inline void resize(ssize_t sz) { if (sz != 0) { assert(sz >= 0 || size() >= size_t(-sz), "Must be"); - size_t sum = Atomic::add(&_size, size_t(sz), memory_order_relaxed); + size_t sum = AtomicAccess::add(&_size, size_t(sz), memory_order_relaxed); update_peak(sum, _count); } } - inline size_t count() const { return Atomic::load(&_count); } - inline size_t size() const { return Atomic::load(&_size); } + inline size_t count() const { return AtomicAccess::load(&_count); } + inline size_t size() const { return AtomicAccess::load(&_size); } inline size_t peak_count() const { - return Atomic::load(&_peak_count); + return AtomicAccess::load(&_peak_count); } inline size_t peak_size() const { - return Atomic::load(&_peak_size); + return AtomicAccess::load(&_peak_size); } }; diff --git a/src/hotspot/share/nmt/memTracker.cpp b/src/hotspot/share/nmt/memTracker.cpp index ed5d34bc12aab..a07c8bd69ef70 100644 --- a/src/hotspot/share/nmt/memTracker.cpp +++ b/src/hotspot/share/nmt/memTracker.cpp @@ -34,7 +34,7 @@ #include "nmt/nmtCommon.hpp" #include "nmt/nmtPreInit.hpp" #include "nmt/threadStackTracker.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/orderAccess.hpp" #include "runtime/vmOperations.hpp" @@ -116,7 +116,7 @@ void MemTracker::final_report(outputStream* output) { // printing the final report during normal VM exit, it should not print // the final report again. In addition, it should be guarded from // recursive calls in case NMT reporting itself crashes. - if (enabled() && Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) { + if (enabled() && AtomicAccess::cmpxchg(&g_final_report_did_run, false, true) == false) { report(tracking_level() == NMT_summary, output, 1); } } diff --git a/src/hotspot/share/nmt/nmtPreInit.hpp b/src/hotspot/share/nmt/nmtPreInit.hpp index 1524c2bd7dc69..d85dde816a342 100644 --- a/src/hotspot/share/nmt/nmtPreInit.hpp +++ b/src/hotspot/share/nmt/nmtPreInit.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2022, 2023 SAP SE. All rights reserved. - * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #ifdef ASSERT -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #endif class outputStream; diff --git a/src/hotspot/share/nmt/virtualMemoryTracker.cpp b/src/hotspot/share/nmt/virtualMemoryTracker.cpp index bdb31d5c99296..25a0e0e2863dd 100644 --- a/src/hotspot/share/nmt/virtualMemoryTracker.cpp +++ b/src/hotspot/share/nmt/virtualMemoryTracker.cpp @@ -35,7 +35,7 @@ VirtualMemorySnapshot VirtualMemorySummary::_snapshot; void VirtualMemory::update_peak(size_t size) { size_t peak_sz = peak_size(); while (peak_sz < size) { - size_t old_sz = Atomic::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed); + size_t old_sz = AtomicAccess::cmpxchg(&_peak_size, peak_sz, size, memory_order_relaxed); if (old_sz == peak_sz) { break; } else { diff --git a/src/hotspot/share/nmt/virtualMemoryTracker.hpp b/src/hotspot/share/nmt/virtualMemoryTracker.hpp index 121fcbbda4bdc..956e7e580fdf1 100644 --- a/src/hotspot/share/nmt/virtualMemoryTracker.hpp +++ b/src/hotspot/share/nmt/virtualMemoryTracker.hpp @@ -28,7 +28,7 @@ #include "nmt/allocationSite.hpp" #include "nmt/regionsTree.hpp" #include "nmt/vmatree.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/nativeCallStack.hpp" #include "utilities/ostream.hpp" @@ -82,7 +82,7 @@ class VirtualMemory { inline size_t reserved() const { return _reserved; } inline size_t committed() const { return _committed; } inline size_t peak_size() const { - return Atomic::load(&_peak_size); + return AtomicAccess::load(&_peak_size); } }; diff --git a/src/hotspot/share/oops/accessBackend.inline.hpp b/src/hotspot/share/oops/accessBackend.inline.hpp index e0d4f5aca92a4..a9ea1bda9c4c6 100644 --- a/src/hotspot/share/oops/accessBackend.inline.hpp +++ b/src/hotspot/share/oops/accessBackend.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "oops/arrayOop.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include @@ -140,7 +140,7 @@ RawAccessBarrier::load_internal(void* addr) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { OrderAccess::fence(); } - return Atomic::load_acquire(reinterpret_cast(addr)); + return AtomicAccess::load_acquire(reinterpret_cast(addr)); } template @@ -148,7 +148,7 @@ template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::load_internal(void* addr) { - return Atomic::load_acquire(reinterpret_cast(addr)); + return AtomicAccess::load_acquire(reinterpret_cast(addr)); } template @@ -156,7 +156,7 @@ template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::load_internal(void* addr) { - return Atomic::load(reinterpret_cast(addr)); + return AtomicAccess::load(reinterpret_cast(addr)); } template @@ -164,7 +164,7 @@ template inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { - Atomic::release_store_fence(reinterpret_cast(addr), value); + AtomicAccess::release_store_fence(reinterpret_cast(addr), value); } template @@ -172,7 +172,7 @@ template inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { - Atomic::release_store(reinterpret_cast(addr), value); + AtomicAccess::release_store(reinterpret_cast(addr), value); } template @@ -180,7 +180,7 @@ template inline typename EnableIf< HasDecorator::value>::type RawAccessBarrier::store_internal(void* addr, T value) { - Atomic::store(reinterpret_cast(addr), value); + AtomicAccess::store(reinterpret_cast(addr), value); } template @@ -188,10 +188,10 @@ template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) { - return Atomic::cmpxchg(reinterpret_cast(addr), - compare_value, - new_value, - memory_order_relaxed); + return AtomicAccess::cmpxchg(reinterpret_cast(addr), + compare_value, + new_value, + memory_order_relaxed); } template @@ -199,10 +199,10 @@ template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::atomic_cmpxchg_internal(void* addr, T compare_value, T new_value) { - return Atomic::cmpxchg(reinterpret_cast(addr), - compare_value, - new_value, - memory_order_conservative); + return AtomicAccess::cmpxchg(reinterpret_cast(addr), + compare_value, + new_value, + memory_order_conservative); } template @@ -210,8 +210,8 @@ template inline typename EnableIf< HasDecorator::value, T>::type RawAccessBarrier::atomic_xchg_internal(void* addr, T new_value) { - return Atomic::xchg(reinterpret_cast(addr), - new_value); + return AtomicAccess::xchg(reinterpret_cast(addr), + new_value); } class RawAccessBarrierArrayCopy: public AllStatic { diff --git a/src/hotspot/share/oops/array.hpp b/src/hotspot/share/oops/array.hpp index 91a348728d2c6..90dcc9dfeaf67 100644 --- a/src/hotspot/share/oops/array.hpp +++ b/src/hotspot/share/oops/array.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_OOPS_ARRAY_HPP #define SHARE_OOPS_ARRAY_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/align.hpp" #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" @@ -133,8 +133,8 @@ class Array: public MetaspaceObj { T* adr_at(const int i) { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &data()[i]; } int find(const T& x) { return index_of(x); } - T at_acquire(const int i) { return Atomic::load_acquire(adr_at(i)); } - void release_at_put(int i, T x) { Atomic::release_store(adr_at(i), x); } + T at_acquire(const int i) { return AtomicAccess::load_acquire(adr_at(i)); } + void release_at_put(int i, T x) { AtomicAccess::release_store(adr_at(i), x); } static int size(int length) { size_t bytes = align_up(byte_sizeof(length), BytesPerWord); diff --git a/src/hotspot/share/oops/arrayKlass.inline.hpp b/src/hotspot/share/oops/arrayKlass.inline.hpp index 21794f82089b4..036b8892ec9a6 100644 --- a/src/hotspot/share/oops/arrayKlass.inline.hpp +++ b/src/hotspot/share/oops/arrayKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,14 +27,14 @@ #include "oops/arrayKlass.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline ObjArrayKlass* ArrayKlass::higher_dimension_acquire() const { - return Atomic::load_acquire(&_higher_dimension); + return AtomicAccess::load_acquire(&_higher_dimension); } inline void ArrayKlass::release_set_higher_dimension(ObjArrayKlass* k) { - Atomic::release_store(&_higher_dimension, k); + AtomicAccess::release_store(&_higher_dimension, k); } #endif // SHARE_OOPS_ARRAYKLASS_INLINE_HPP diff --git a/src/hotspot/share/oops/constMethodFlags.cpp b/src/hotspot/share/oops/constMethodFlags.cpp index 16bf2aab61374..928518c8257a2 100644 --- a/src/hotspot/share/oops/constMethodFlags.cpp +++ b/src/hotspot/share/oops/constMethodFlags.cpp @@ -23,7 +23,7 @@ */ #include "oops/constMethodFlags.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/ostream.hpp" void ConstMethodFlags::print_on(outputStream* st) const { diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp index 735968f7a9587..ddb24302d6943 100644 --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -60,7 +60,7 @@ #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" @@ -277,7 +277,7 @@ void ConstantPool::klass_at_put(int class_index, Klass* k) { CPKlassSlot kslot = klass_slot_at(class_index); int resolved_klass_index = kslot.resolved_klass_index(); Klass** adr = resolved_klasses()->adr_at(resolved_klass_index); - Atomic::release_store(adr, k); + AtomicAccess::release_store(adr, k); // The interpreter assumes when the tag is stored, the klass is resolved // and the Klass* non-null, so we need hardware store ordering here. @@ -694,16 +694,16 @@ Klass* ConstantPool::klass_at_impl(const constantPoolHandle& this_cp, int cp_ind // hardware store ordering here. // We also need to CAS to not overwrite an error from a racing thread. Klass** adr = this_cp->resolved_klasses()->adr_at(resolved_klass_index); - Atomic::release_store(adr, k); + AtomicAccess::release_store(adr, k); - jbyte old_tag = Atomic::cmpxchg((jbyte*)this_cp->tag_addr_at(cp_index), - (jbyte)JVM_CONSTANT_UnresolvedClass, - (jbyte)JVM_CONSTANT_Class); + jbyte old_tag = AtomicAccess::cmpxchg((jbyte*)this_cp->tag_addr_at(cp_index), + (jbyte)JVM_CONSTANT_UnresolvedClass, + (jbyte)JVM_CONSTANT_Class); // We need to recheck exceptions from racing thread and return the same. if (old_tag == JVM_CONSTANT_UnresolvedClassInError) { // Remove klass. - Atomic::store(adr, (Klass*)nullptr); + AtomicAccess::store(adr, (Klass*)nullptr); throw_resolution_error(this_cp, cp_index, CHECK_NULL); } @@ -1035,9 +1035,9 @@ void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, i // This doesn't deterministically get an error. So why do we save this? // We save this because jvmti can add classes to the bootclass path after // this error, so it needs to get the same error if the error is first. - jbyte old_tag = Atomic::cmpxchg((jbyte*)this_cp->tag_addr_at(cp_index), - (jbyte)tag.value(), - (jbyte)error_tag); + jbyte old_tag = AtomicAccess::cmpxchg((jbyte*)this_cp->tag_addr_at(cp_index), + (jbyte)tag.value(), + (jbyte)error_tag); if (old_tag != error_tag && old_tag != tag.value()) { // MethodHandles and MethodType doesn't change to resolved version. assert(this_cp->tag_at(cp_index).is_klass(), "Wrong tag value"); diff --git a/src/hotspot/share/oops/constantPool.inline.hpp b/src/hotspot/share/oops/constantPool.inline.hpp index 3aed4408121d1..86ebea814d01f 100644 --- a/src/hotspot/share/oops/constantPool.inline.hpp +++ b/src/hotspot/share/oops/constantPool.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "oops/resolvedMethodEntry.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline Klass* ConstantPool::resolved_klass_at(int which) const { // Used by Compiler guarantee(tag_at(which).is_klass(), "Corrupted constant pool"); @@ -41,7 +41,7 @@ inline Klass* ConstantPool::resolved_klass_at(int which) const { // Used by Com assert(tag_at(kslot.name_index()).is_symbol(), "sanity"); Klass** adr = resolved_klasses()->adr_at(kslot.resolved_klass_index()); - return Atomic::load_acquire(adr); + return AtomicAccess::load_acquire(adr); } inline ResolvedFieldEntry* ConstantPool::resolved_field_entry_at(int field_index) { diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp index 8944f85af0ddd..941ceac8de12b 100644 --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -53,7 +53,7 @@ #include "oops/resolvedMethodEntry.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/synchronizer.hpp" diff --git a/src/hotspot/share/oops/cpCache.inline.hpp b/src/hotspot/share/oops/cpCache.inline.hpp index 8dec47a3c73db..dce433bc592a8 100644 --- a/src/hotspot/share/oops/cpCache.inline.hpp +++ b/src/hotspot/share/oops/cpCache.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "oops/resolvedMethodEntry.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" // Constructor inline ConstantPoolCache::ConstantPoolCache(const intStack& invokedynamic_references_map, diff --git a/src/hotspot/share/oops/fieldInfo.cpp b/src/hotspot/share/oops/fieldInfo.cpp index 8c1a9e46d4034..398d2cf5635da 100644 --- a/src/hotspot/share/oops/fieldInfo.cpp +++ b/src/hotspot/share/oops/fieldInfo.cpp @@ -25,7 +25,7 @@ #include "cds/cdsConfig.hpp" #include "memory/resourceArea.hpp" #include "oops/fieldInfo.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/packedTable.hpp" void FieldInfo::print(outputStream* os, ConstantPool* cp) { diff --git a/src/hotspot/share/oops/fieldInfo.inline.hpp b/src/hotspot/share/oops/fieldInfo.inline.hpp index 842393729b2ea..57a1a3607d7d2 100644 --- a/src/hotspot/share/oops/fieldInfo.inline.hpp +++ b/src/hotspot/share/oops/fieldInfo.inline.hpp @@ -30,7 +30,7 @@ #include "memory/metadataFactory.hpp" #include "oops/constantPool.hpp" #include "oops/symbol.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/checkedCast.hpp" inline Symbol* FieldInfo::name(ConstantPool* cp) const { @@ -173,11 +173,11 @@ inline FieldInfoReader& FieldInfoReader::set_position_and_next_index(int positio } inline void FieldStatus::atomic_set_bits(u1& flags, u1 mask) { - Atomic::fetch_then_or(&flags, mask); + AtomicAccess::fetch_then_or(&flags, mask); } inline void FieldStatus::atomic_clear_bits(u1& flags, u1 mask) { - Atomic::fetch_then_and(&flags, (u1)(~mask)); + AtomicAccess::fetch_then_and(&flags, (u1)(~mask)); } inline void FieldStatus::update_flag(FieldStatusBitPosition pos, bool z) { diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp index e0ebb92c7ae34..f5e5628c99a08 100644 --- a/src/hotspot/share/oops/instanceKlass.cpp +++ b/src/hotspot/share/oops/instanceKlass.cpp @@ -78,7 +78,7 @@ #include "prims/jvmtiThreadState.hpp" #include "prims/methodComparator.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" @@ -1413,7 +1413,7 @@ InstanceKlass* InstanceKlass::implementor() const { return nullptr; } else { // This load races with inserts, and therefore needs acquire. - InstanceKlass* ikls = Atomic::load_acquire(ik); + InstanceKlass* ikls = AtomicAccess::load_acquire(ik); if (ikls != nullptr && !ikls->is_loader_alive()) { return nullptr; // don't return unloaded class } else { @@ -1429,7 +1429,7 @@ void InstanceKlass::set_implementor(InstanceKlass* ik) { InstanceKlass* volatile* addr = adr_implementor(); assert(addr != nullptr, "null addr"); if (addr != nullptr) { - Atomic::release_store(addr, ik); + AtomicAccess::release_store(addr, ik); } } @@ -1759,11 +1759,11 @@ void InstanceKlass::mask_for(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { // Lazily create the _oop_map_cache at first request. // Load_acquire is needed to safely get instance published with CAS by another thread. - OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache); + OopMapCache* oop_map_cache = AtomicAccess::load_acquire(&_oop_map_cache); if (oop_map_cache == nullptr) { // Try to install new instance atomically. oop_map_cache = new OopMapCache(); - OopMapCache* other = Atomic::cmpxchg(&_oop_map_cache, (OopMapCache*)nullptr, oop_map_cache); + OopMapCache* other = AtomicAccess::cmpxchg(&_oop_map_cache, (OopMapCache*)nullptr, oop_map_cache); if (other != nullptr) { // Someone else managed to install before us, ditch local copy and use the existing one. delete oop_map_cache; @@ -2390,7 +2390,7 @@ jmethodID InstanceKlass::update_jmethod_id(jmethodID* jmeths, Method* method, in assert(method != nullptr, "old and but not obsolete, so should exist"); } jmethodID new_id = Method::make_jmethod_id(class_loader_data(), method); - Atomic::release_store(&jmeths[idnum + 1], new_id); + AtomicAccess::release_store(&jmeths[idnum + 1], new_id); return new_id; } @@ -2405,11 +2405,11 @@ static jmethodID* create_jmethod_id_cache(size_t size) { // When reading outside a lock, use this. jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const { - return Atomic::load_acquire(&_methods_jmethod_ids); + return AtomicAccess::load_acquire(&_methods_jmethod_ids); } void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) { - Atomic::release_store(&_methods_jmethod_ids, jmeths); + AtomicAccess::release_store(&_methods_jmethod_ids, jmeths); } // Lookup or create a jmethodID. @@ -2448,7 +2448,7 @@ jmethodID InstanceKlass::get_jmethod_id(Method* method) { } } - jmethodID id = Atomic::load_acquire(&jmeths[idnum + 1]); + jmethodID id = AtomicAccess::load_acquire(&jmeths[idnum + 1]); if (id == nullptr) { MutexLocker ml(JmethodIdCreation_lock, Mutex::_no_safepoint_check_flag); id = jmeths[idnum + 1]; @@ -2497,11 +2497,11 @@ void InstanceKlass::make_methods_jmethod_ids() { Method* m = methods()->at(index); int idnum = m->method_idnum(); assert(!m->is_old(), "should not have old methods or I'm confused"); - jmethodID id = Atomic::load_acquire(&jmeths[idnum + 1]); + jmethodID id = AtomicAccess::load_acquire(&jmeths[idnum + 1]); if (!m->is_overpass() && // skip overpasses id == nullptr) { id = Method::make_jmethod_id(class_loader_data(), m); - Atomic::release_store(&jmeths[idnum + 1], id); + AtomicAccess::release_store(&jmeths[idnum + 1], id); } } } @@ -2554,10 +2554,10 @@ void InstanceKlass::clean_implementors_list() { // Use load_acquire due to competing with inserts InstanceKlass* volatile* iklass = adr_implementor(); assert(iklass != nullptr, "Klass must not be null"); - InstanceKlass* impl = Atomic::load_acquire(iklass); + InstanceKlass* impl = AtomicAccess::load_acquire(iklass); if (impl != nullptr && !impl->is_loader_alive()) { // null this field, might be an unloaded instance klass or null - if (Atomic::cmpxchg(iklass, impl, (InstanceKlass*)nullptr) == impl) { + if (AtomicAccess::cmpxchg(iklass, impl, (InstanceKlass*)nullptr) == impl) { // Successfully unlinking implementor. if (log_is_enabled(Trace, class, unload)) { ResourceMark rm; @@ -4261,7 +4261,7 @@ void InstanceKlass::set_init_state(ClassState state) { assert(good_state || state == allocated, "illegal state transition"); #endif assert(_init_thread == nullptr, "should be cleared before state change"); - Atomic::release_store(&_init_state, state); + AtomicAccess::release_store(&_init_state, state); } #if INCLUDE_JVMTI diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index dfd134857b8bf..c2d5e9cc09818 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -506,7 +506,7 @@ class InstanceKlass: public Klass { ClassLoaderData* loader_data, TRAPS); - JavaThread* init_thread() { return Atomic::load(&_init_thread); } + JavaThread* init_thread() { return AtomicAccess::load(&_init_thread); } const char* init_thread_name() { return init_thread()->name_raw(); } @@ -520,7 +520,7 @@ class InstanceKlass: public Klass { bool is_being_initialized() const { return init_state() == being_initialized; } bool is_in_error_state() const { return init_state() == initialization_error; } bool is_reentrant_initialization(Thread *thread) { return thread == _init_thread; } - ClassState init_state() const { return Atomic::load_acquire(&_init_state); } + ClassState init_state() const { return AtomicAccess::load_acquire(&_init_state); } const char* init_state_name() const; bool is_rewritten() const { return _misc_flags.rewritten(); } @@ -1062,7 +1062,7 @@ class InstanceKlass: public Klass { void set_init_thread(JavaThread *thread) { assert((thread == JavaThread::current() && _init_thread == nullptr) || (thread == nullptr && _init_thread == JavaThread::current()), "Only one thread is allowed to own initialization"); - Atomic::store(&_init_thread, thread); + AtomicAccess::store(&_init_thread, thread); } jmethodID* methods_jmethod_ids_acquire() const; diff --git a/src/hotspot/share/oops/instanceKlass.inline.hpp b/src/hotspot/share/oops/instanceKlass.inline.hpp index f9db34f488439..1602e3e87c4db 100644 --- a/src/hotspot/share/oops/instanceKlass.inline.hpp +++ b/src/hotspot/share/oops/instanceKlass.inline.hpp @@ -31,7 +31,7 @@ #include "oops/fieldInfo.inline.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/devirtualizer.inline.hpp" #include "utilities/globalDefinitions.hpp" @@ -64,11 +64,11 @@ inline InstanceKlass* volatile* InstanceKlass::adr_implementor() const { } inline ObjArrayKlass* InstanceKlass::array_klasses_acquire() const { - return Atomic::load_acquire(&_array_klasses); + return AtomicAccess::load_acquire(&_array_klasses); } inline void InstanceKlass::release_set_array_klasses(ObjArrayKlass* k) { - Atomic::release_store(&_array_klasses, k); + AtomicAccess::release_store(&_array_klasses, k); } // The iteration over the oops in objects is a hot path in the GC code. diff --git a/src/hotspot/share/oops/instanceKlassFlags.hpp b/src/hotspot/share/oops/instanceKlassFlags.hpp index 18a9c76103d84..5eebaab27d12b 100644 --- a/src/hotspot/share/oops/instanceKlassFlags.hpp +++ b/src/hotspot/share/oops/instanceKlassFlags.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_OOPS_INSTANCEKLASSFLAGS_HPP #define SHARE_OOPS_INSTANCEKLASSFLAGS_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" class ClassLoaderData; @@ -122,8 +122,8 @@ class InstanceKlassFlags { IK_STATUS_DO(IK_STATUS_GET_SET) #undef IK_STATUS_GET_SET - void atomic_set_bits(u1 bits) { Atomic::fetch_then_or(&_status, bits); } - void atomic_clear_bits(u1 bits) { Atomic::fetch_then_and(&_status, (u1)(~bits)); } + void atomic_set_bits(u1 bits) { AtomicAccess::fetch_then_or(&_status, bits); } + void atomic_clear_bits(u1 bits) { AtomicAccess::fetch_then_and(&_status, (u1)(~bits)); } void print_on(outputStream* st) const; }; diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp index 92bcaebec4a93..b6e60b4fa7d91 100644 --- a/src/hotspot/share/oops/klass.cpp +++ b/src/hotspot/share/oops/klass.cpp @@ -50,7 +50,7 @@ #include "oops/oop.inline.hpp" #include "oops/oopHandle.inline.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/perfData.hpp" #include "utilities/macros.hpp" @@ -617,11 +617,11 @@ GrowableArray* Klass::compute_secondary_supers(int num_extra_slots, Klass* Klass::subklass(bool log) const { // Need load_acquire on the _subklass, because it races with inserts that // publishes freshly initialized data. - for (Klass* chain = Atomic::load_acquire(&_subklass); + for (Klass* chain = AtomicAccess::load_acquire(&_subklass); chain != nullptr; // Do not need load_acquire on _next_sibling, because inserts never // create _next_sibling edges to dead data. - chain = Atomic::load(&chain->_next_sibling)) + chain = AtomicAccess::load(&chain->_next_sibling)) { if (chain->is_loader_alive()) { return chain; @@ -638,9 +638,9 @@ Klass* Klass::subklass(bool log) const { Klass* Klass::next_sibling(bool log) const { // Do not need load_acquire on _next_sibling, because inserts never // create _next_sibling edges to dead data. - for (Klass* chain = Atomic::load(&_next_sibling); + for (Klass* chain = AtomicAccess::load(&_next_sibling); chain != nullptr; - chain = Atomic::load(&chain->_next_sibling)) { + chain = AtomicAccess::load(&chain->_next_sibling)) { // Only return alive klass, there may be stale klass // in this chain if cleaned concurrently. if (chain->is_loader_alive()) { @@ -657,7 +657,7 @@ Klass* Klass::next_sibling(bool log) const { void Klass::set_subklass(Klass* s) { assert(s != this, "sanity check"); - Atomic::release_store(&_subklass, s); + AtomicAccess::release_store(&_subklass, s); } void Klass::set_next_sibling(Klass* s) { @@ -665,7 +665,7 @@ void Klass::set_next_sibling(Klass* s) { // Does not need release semantics. If used by cleanup, it will link to // already safely published data, and if used by inserts, will be published // safely using cmpxchg. - Atomic::store(&_next_sibling, s); + AtomicAccess::store(&_next_sibling, s); } void Klass::append_to_sibling_list() { @@ -684,7 +684,7 @@ void Klass::append_to_sibling_list() { super->clean_subklass(); for (;;) { - Klass* prev_first_subklass = Atomic::load_acquire(&_super->_subklass); + Klass* prev_first_subklass = AtomicAccess::load_acquire(&_super->_subklass); if (prev_first_subklass != nullptr) { // set our sibling to be the super' previous first subklass assert(prev_first_subklass->is_loader_alive(), "May not attach not alive klasses"); @@ -693,7 +693,7 @@ void Klass::append_to_sibling_list() { // Note that the prev_first_subklass is always alive, meaning no sibling_next links // are ever created to not alive klasses. This is an important invariant of the lock-free // cleaning protocol, that allows us to safely unlink dead klasses from the sibling list. - if (Atomic::cmpxchg(&super->_subklass, prev_first_subklass, this) == prev_first_subklass) { + if (AtomicAccess::cmpxchg(&super->_subklass, prev_first_subklass, this) == prev_first_subklass) { return; } } @@ -703,12 +703,12 @@ void Klass::append_to_sibling_list() { void Klass::clean_subklass() { for (;;) { // Need load_acquire, due to contending with concurrent inserts - Klass* subklass = Atomic::load_acquire(&_subklass); + Klass* subklass = AtomicAccess::load_acquire(&_subklass); if (subklass == nullptr || subklass->is_loader_alive()) { return; } // Try to fix _subklass until it points at something not dead. - Atomic::cmpxchg(&_subklass, subklass, subklass->next_sibling()); + AtomicAccess::cmpxchg(&_subklass, subklass, subklass->next_sibling()); } } diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 69203189bb7d3..c09a6c1448565 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -65,7 +65,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuationEntry.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -633,7 +633,7 @@ bool Method::init_training_data(MethodTrainingData* td) { bool Method::install_training_method_data(const methodHandle& method) { MethodTrainingData* mtd = MethodTrainingData::find(method); if (mtd != nullptr && mtd->final_profile() != nullptr) { - Atomic::replace_if_null(&method->_method_data, mtd->final_profile()); + AtomicAccess::replace_if_null(&method->_method_data, mtd->final_profile()); return true; } return false; @@ -660,7 +660,7 @@ void Method::build_profiling_method_data(const methodHandle& method, TRAPS) { return; // return the exception (which is cleared) } - if (!Atomic::replace_if_null(&method->_method_data, method_data)) { + if (!AtomicAccess::replace_if_null(&method->_method_data, method_data)) { MetadataFactory::free_metadata(loader_data, method_data); return; } @@ -711,7 +711,7 @@ MethodCounters* Method::build_method_counters(Thread* current, Method* m) { bool Method::init_method_counters(MethodCounters* counters) { // Try to install a pointer to MethodCounters, return true on success. - return Atomic::replace_if_null(&_method_counters, counters); + return AtomicAccess::replace_if_null(&_method_counters, counters); } void Method::set_exception_handler_entered(int handler_bci) { @@ -1349,7 +1349,7 @@ address Method::verified_code_entry() { // Not inline to avoid circular ref. bool Method::check_code() const { // cached in a register or local. There's a race on the value of the field. - nmethod *code = Atomic::load_acquire(&_code); + nmethod *code = AtomicAccess::load_acquire(&_code); return code == nullptr || (code->method() == nullptr) || (code->method() == (Method*)this && !code->is_osr_method()); } @@ -1389,7 +1389,7 @@ void Method::set_code(const methodHandle& mh, nmethod *code) { guarantee(false, "Unknown Continuation native intrinsic"); } // This must come last, as it is what's tested in LinkResolver::resolve_static_call - Atomic::release_store(&mh->_from_interpreted_entry , mh->get_i2c_entry()); + AtomicAccess::release_store(&mh->_from_interpreted_entry , mh->get_i2c_entry()); } else if (!mh->is_method_handle_intrinsic()) { // Instantly compiled code can execute. mh->_from_interpreted_entry = mh->get_i2c_entry(); diff --git a/src/hotspot/share/oops/method.inline.hpp b/src/hotspot/share/oops/method.inline.hpp index 18fca354b6b66..95398643ca178 100644 --- a/src/hotspot/share/oops/method.inline.hpp +++ b/src/hotspot/share/oops/method.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,19 +31,19 @@ #include "code/nmethod.inline.hpp" #include "oops/methodCounters.hpp" #include "oops/methodData.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline address Method::from_compiled_entry() const { - return Atomic::load_acquire(&_from_compiled_entry); + return AtomicAccess::load_acquire(&_from_compiled_entry); } inline address Method::from_interpreted_entry() const { - return Atomic::load_acquire(&_from_interpreted_entry); + return AtomicAccess::load_acquire(&_from_interpreted_entry); } inline nmethod* Method::code() const { assert( check_code(), "" ); - return Atomic::load_acquire(&_code); + return AtomicAccess::load_acquire(&_code); } // Write (bci, line number) pair to stream diff --git a/src/hotspot/share/oops/methodCounters.hpp b/src/hotspot/share/oops/methodCounters.hpp index df8acefc3eb97..5733e6e61f096 100644 --- a/src/hotspot/share/oops/methodCounters.hpp +++ b/src/hotspot/share/oops/methodCounters.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -160,7 +160,7 @@ class MethodCounters : public Metadata { return true; } if (cur == nullptr || cur == reinterpret_cast(method_training_data_sentinel())) { - return Atomic::cmpxchg(reinterpret_cast(&_method_training_data), cur, td) == cur; + return AtomicAccess::cmpxchg(reinterpret_cast(&_method_training_data), cur, td) == cur; } return false; } diff --git a/src/hotspot/share/oops/methodData.cpp b/src/hotspot/share/oops/methodData.cpp index 0463d8d9a81e7..38bdc33c6281c 100644 --- a/src/hotspot/share/oops/methodData.cpp +++ b/src/hotspot/share/oops/methodData.cpp @@ -38,7 +38,7 @@ #include "oops/method.inline.hpp" #include "oops/methodData.inline.hpp" #include "prims/jvmtiRedefineClasses.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/orderAccess.hpp" @@ -912,7 +912,7 @@ bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** } guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned"); } - FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs); + FailedSpeculation* old_fs = AtomicAccess::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs); if (old_fs == nullptr) { // Successfully appended fs to end of the list return true; @@ -1511,7 +1511,7 @@ ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& for (;; dp = next_extra(dp)) { assert(dp < end, "moved past end of extra data"); - // No need for "Atomic::load_acquire" ops, + // No need for "AtomicAccess::load_acquire" ops, // since the data structure is monotonic. switch(dp->tag()) { case DataLayout::no_tag: @@ -1638,7 +1638,7 @@ void MethodData::print_data_on(outputStream* st) const { DataLayout* end = args_data_limit(); for (;; dp = next_extra(dp)) { assert(dp < end, "moved past end of extra data"); - // No need for "Atomic::load_acquire" ops, + // No need for "AtomicAccess::load_acquire" ops, // since the data structure is monotonic. switch(dp->tag()) { case DataLayout::no_tag: @@ -1859,11 +1859,11 @@ class CleanExtraDataMethodClosure : public CleanExtraDataClosure { }; Mutex* MethodData::extra_data_lock() { - Mutex* lock = Atomic::load_acquire(&_extra_data_lock); + Mutex* lock = AtomicAccess::load_acquire(&_extra_data_lock); if (lock == nullptr) { // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock"); - Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock); + Mutex* old = AtomicAccess::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock); if (old != nullptr) { // Another thread created the lock before us. Use that lock instead. delete lock; diff --git a/src/hotspot/share/oops/methodData.hpp b/src/hotspot/share/oops/methodData.hpp index 61137d9fb7ac1..f9f36c8ad156d 100644 --- a/src/hotspot/share/oops/methodData.hpp +++ b/src/hotspot/share/oops/methodData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "interpreter/invocationCounter.hpp" #include "oops/metadata.hpp" #include "oops/method.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/mutex.hpp" #include "utilities/align.hpp" @@ -181,7 +181,7 @@ class DataLayout { } u1 flags() const { - return Atomic::load_acquire(&_header._struct._flags); + return AtomicAccess::load_acquire(&_header._struct._flags); } u2 bci() const { @@ -214,7 +214,7 @@ class DataLayout { // already set. return false; } - } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, static_cast(compare_value | bit))); + } while (compare_value != AtomicAccess::cmpxchg(&_header._struct._flags, compare_value, static_cast(compare_value | bit))); return true; } @@ -229,7 +229,7 @@ class DataLayout { return false; } exchange_value = compare_value & ~bit; - } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, exchange_value)); + } while (compare_value != AtomicAccess::cmpxchg(&_header._struct._flags, compare_value, exchange_value)); return true; } diff --git a/src/hotspot/share/oops/methodData.inline.hpp b/src/hotspot/share/oops/methodData.inline.hpp index a59271431b573..dee14d492536d 100644 --- a/src/hotspot/share/oops/methodData.inline.hpp +++ b/src/hotspot/share/oops/methodData.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,11 +27,11 @@ #include "oops/methodData.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" inline void DataLayout::release_set_cell_at(int index, intptr_t value) { - Atomic::release_store(&_cells[index], value); + AtomicAccess::release_store(&_cells[index], value); } inline void ProfileData::release_set_intptr_at(int index, intptr_t value) { diff --git a/src/hotspot/share/oops/methodFlags.hpp b/src/hotspot/share/oops/methodFlags.hpp index 2ae1e002b653d..8f291f3501e06 100644 --- a/src/hotspot/share/oops/methodFlags.hpp +++ b/src/hotspot/share/oops/methodFlags.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_OOPS_METHODFLAGS_HPP #define SHARE_OOPS_METHODFLAGS_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -87,8 +87,8 @@ class MethodFlags { #undef M_STATUS_GET_SET int as_int() const { return _status; } - void atomic_set_bits(u4 bits) { Atomic::fetch_then_or(&_status, bits); } - void atomic_clear_bits(u4 bits) { Atomic::fetch_then_and(&_status, ~bits); } + void atomic_set_bits(u4 bits) { AtomicAccess::fetch_then_or(&_status, bits); } + void atomic_clear_bits(u4 bits) { AtomicAccess::fetch_then_and(&_status, ~bits); } void print_on(outputStream* st) const; }; diff --git a/src/hotspot/share/oops/oop.cpp b/src/hotspot/share/oops/oop.cpp index be6e16855bda2..51480c68c22c6 100644 --- a/src/hotspot/share/oops/oop.cpp +++ b/src/hotspot/share/oops/oop.cpp @@ -164,37 +164,37 @@ void oopDesc::release_obj_field_put(int offset, oop value) { HeapAcce void oopDesc::obj_field_put_volatile(int offset, oop value) { HeapAccess::oop_store_at(as_oop(), offset, value); } address oopDesc::address_field(int offset) const { return *field_addr
(offset); } -address oopDesc::address_field_acquire(int offset) const { return Atomic::load_acquire(field_addr
(offset)); } +address oopDesc::address_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr
(offset)); } void oopDesc::address_field_put(int offset, address value) { *field_addr
(offset) = value; } -void oopDesc::release_address_field_put(int offset, address value) { Atomic::release_store(field_addr
(offset), value); } +void oopDesc::release_address_field_put(int offset, address value) { AtomicAccess::release_store(field_addr
(offset), value); } Metadata* oopDesc::metadata_field(int offset) const { return *field_addr(offset); } void oopDesc::metadata_field_put(int offset, Metadata* value) { *field_addr(offset) = value; } -Metadata* oopDesc::metadata_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_metadata_field_put(int offset, Metadata* value) { Atomic::release_store(field_addr(offset), value); } +Metadata* oopDesc::metadata_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_metadata_field_put(int offset, Metadata* value) { AtomicAccess::release_store(field_addr(offset), value); } -jbyte oopDesc::byte_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_byte_field_put(int offset, jbyte value) { Atomic::release_store(field_addr(offset), value); } +jbyte oopDesc::byte_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_byte_field_put(int offset, jbyte value) { AtomicAccess::release_store(field_addr(offset), value); } -jchar oopDesc::char_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_char_field_put(int offset, jchar value) { Atomic::release_store(field_addr(offset), value); } +jchar oopDesc::char_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_char_field_put(int offset, jchar value) { AtomicAccess::release_store(field_addr(offset), value); } -jboolean oopDesc::bool_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_bool_field_put(int offset, jboolean value) { Atomic::release_store(field_addr(offset), jboolean(value & 1)); } +jboolean oopDesc::bool_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_bool_field_put(int offset, jboolean value) { AtomicAccess::release_store(field_addr(offset), jboolean(value & 1)); } -jint oopDesc::int_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_int_field_put(int offset, jint value) { Atomic::release_store(field_addr(offset), value); } +jint oopDesc::int_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_int_field_put(int offset, jint value) { AtomicAccess::release_store(field_addr(offset), value); } -jshort oopDesc::short_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_short_field_put(int offset, jshort value) { Atomic::release_store(field_addr(offset), value); } +jshort oopDesc::short_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_short_field_put(int offset, jshort value) { AtomicAccess::release_store(field_addr(offset), value); } -jlong oopDesc::long_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_long_field_put(int offset, jlong value) { Atomic::release_store(field_addr(offset), value); } +jlong oopDesc::long_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_long_field_put(int offset, jlong value) { AtomicAccess::release_store(field_addr(offset), value); } -jfloat oopDesc::float_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_float_field_put(int offset, jfloat value) { Atomic::release_store(field_addr(offset), value); } +jfloat oopDesc::float_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_float_field_put(int offset, jfloat value) { AtomicAccess::release_store(field_addr(offset), value); } -jdouble oopDesc::double_field_acquire(int offset) const { return Atomic::load_acquire(field_addr(offset)); } -void oopDesc::release_double_field_put(int offset, jdouble value) { Atomic::release_store(field_addr(offset), value); } +jdouble oopDesc::double_field_acquire(int offset) const { return AtomicAccess::load_acquire(field_addr(offset)); } +void oopDesc::release_double_field_put(int offset, jdouble value) { AtomicAccess::release_store(field_addr(offset), value); } diff --git a/src/hotspot/share/oops/oop.hpp b/src/hotspot/share/oops/oop.hpp index a1c1a64b0508c..cb0e9ea07e088 100644 --- a/src/hotspot/share/oops/oop.hpp +++ b/src/hotspot/share/oops/oop.hpp @@ -32,7 +32,7 @@ #include "oops/markWord.hpp" #include "oops/metadata.hpp" #include "oops/objLayout.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" diff --git a/src/hotspot/share/oops/oop.inline.hpp b/src/hotspot/share/oops/oop.inline.hpp index 16c444a43f843..b445eae933b30 100644 --- a/src/hotspot/share/oops/oop.inline.hpp +++ b/src/hotspot/share/oops/oop.inline.hpp @@ -37,7 +37,7 @@ #include "oops/markWord.inline.hpp" #include "oops/objLayout.inline.hpp" #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -51,15 +51,15 @@ void* oopDesc::base_addr() { return this; } const void* oopDesc::base_addr() const { return this; } markWord oopDesc::mark() const { - return Atomic::load(&_mark); + return AtomicAccess::load(&_mark); } markWord oopDesc::mark_acquire() const { - return Atomic::load_acquire(&_mark); + return AtomicAccess::load_acquire(&_mark); } void oopDesc::set_mark(markWord m) { - Atomic::store(&_mark, m); + AtomicAccess::store(&_mark, m); } void oopDesc::set_mark(HeapWord* mem, markWord m) { @@ -67,19 +67,19 @@ void oopDesc::set_mark(HeapWord* mem, markWord m) { } void oopDesc::release_set_mark(HeapWord* mem, markWord m) { - Atomic::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m); + AtomicAccess::release_store((markWord*)(((char*)mem) + mark_offset_in_bytes()), m); } void oopDesc::release_set_mark(markWord m) { - Atomic::release_store(&_mark, m); + AtomicAccess::release_store(&_mark, m); } markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark) { - return Atomic::cmpxchg(&_mark, old_mark, new_mark); + return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark); } markWord oopDesc::cas_set_mark(markWord new_mark, markWord old_mark, atomic_memory_order order) { - return Atomic::cmpxchg(&_mark, old_mark, new_mark, order); + return AtomicAccess::cmpxchg(&_mark, old_mark, new_mark, order); } markWord oopDesc::prototype_mark() const { @@ -121,11 +121,11 @@ Klass* oopDesc::klass_or_null_acquire() const { case ObjLayout::Compact: return mark_acquire().klass(); case ObjLayout::Compressed: { - narrowKlass narrow_klass = Atomic::load_acquire(&_metadata._compressed_klass); + narrowKlass narrow_klass = AtomicAccess::load_acquire(&_metadata._compressed_klass); return CompressedKlassPointers::decode(narrow_klass); } default: - return Atomic::load_acquire(&_metadata._klass); + return AtomicAccess::load_acquire(&_metadata._klass); } } @@ -166,10 +166,10 @@ void oopDesc::release_set_klass(HeapWord* mem, Klass* k) { assert(!UseCompactObjectHeaders, "don't set Klass* with compact headers"); char* raw_mem = ((char*)mem + klass_offset_in_bytes()); if (UseCompressedClassPointers) { - Atomic::release_store((narrowKlass*)raw_mem, + AtomicAccess::release_store((narrowKlass*)raw_mem, CompressedKlassPointers::encode_not_null(k)); } else { - Atomic::release_store((Klass**)raw_mem, k); + AtomicAccess::release_store((Klass**)raw_mem, k); } } @@ -271,8 +271,8 @@ inline void oopDesc::short_field_put(int offset, jshort value) { *field_add inline jint oopDesc::int_field(int offset) const { return *field_addr(offset); } inline void oopDesc::int_field_put(int offset, jint value) { *field_addr(offset) = value; } -inline jint oopDesc::int_field_relaxed(int offset) const { return Atomic::load(field_addr(offset)); } -inline void oopDesc::int_field_put_relaxed(int offset, jint value) { Atomic::store(field_addr(offset), value); } +inline jint oopDesc::int_field_relaxed(int offset) const { return AtomicAccess::load(field_addr(offset)); } +inline void oopDesc::int_field_put_relaxed(int offset, jint value) { AtomicAccess::store(field_addr(offset), value); } inline jlong oopDesc::long_field(int offset) const { return *field_addr(offset); } inline void oopDesc::long_field_put(int offset, jlong value) { *field_addr(offset) = value; } diff --git a/src/hotspot/share/oops/resolvedFieldEntry.hpp b/src/hotspot/share/oops/resolvedFieldEntry.hpp index 1e89d10ab0c0d..1df4ae8d956bc 100644 --- a/src/hotspot/share/oops/resolvedFieldEntry.hpp +++ b/src/hotspot/share/oops/resolvedFieldEntry.hpp @@ -27,7 +27,7 @@ #include "interpreter/bytecodes.hpp" #include "oops/instanceKlass.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/checkedCast.hpp" #include "utilities/sizes.hpp" @@ -130,8 +130,8 @@ class ResolvedFieldEntry { u2 field_index() const { return _field_index; } u2 constant_pool_index() const { return _cpool_index; } u1 tos_state() const { return _tos_state; } - u1 get_code() const { return Atomic::load_acquire(&_get_code); } - u1 put_code() const { return Atomic::load_acquire(&_put_code); } + u1 get_code() const { return AtomicAccess::load_acquire(&_get_code); } + u1 put_code() const { return AtomicAccess::load_acquire(&_put_code); } bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } bool is_volatile () const { return (_flags & (1 << is_volatile_shift)) != 0; } bool is_resolved(Bytecodes::Code code) const { @@ -164,7 +164,7 @@ class ResolvedFieldEntry { volatile Bytecodes::Code c = (Bytecodes::Code)*code; assert(c == 0 || c == new_code || new_code == 0, "update must be consistent"); #endif - Atomic::release_store(code, new_code); + AtomicAccess::release_store(code, new_code); } // Populate the strucutre with resolution information diff --git a/src/hotspot/share/oops/resolvedIndyEntry.hpp b/src/hotspot/share/oops/resolvedIndyEntry.hpp index af0efbadc9f76..04f46a42e8cb6 100644 --- a/src/hotspot/share/oops/resolvedIndyEntry.hpp +++ b/src/hotspot/share/oops/resolvedIndyEntry.hpp @@ -77,7 +77,7 @@ class ResolvedIndyEntry { }; // Getters - Method* method() const { return Atomic::load_acquire(&_method); } + Method* method() const { return AtomicAccess::load_acquire(&_method); } u2 resolved_references_index() const { return _resolved_references_index; } u2 constant_pool_index() const { return _cpool_index; } u2 num_parameters() const { return _number_of_parameters; } @@ -101,7 +101,7 @@ class ResolvedIndyEntry { void set_num_parameters(int value) { assert(_number_of_parameters == 0 || _number_of_parameters == value, "size must not change: parameter_size=%d, value=%d", _number_of_parameters, value); - Atomic::store(&_number_of_parameters, (u2)value); + AtomicAccess::store(&_number_of_parameters, (u2)value); guarantee(_number_of_parameters == value, "size must not change: parameter_size=%d, value=%d", _number_of_parameters, value); } @@ -113,7 +113,7 @@ class ResolvedIndyEntry { set_has_appendix(has_appendix); // Set the method last since it is read lock free. // Resolution is indicated by whether or not the method is set. - Atomic::release_store(&_method, m); + AtomicAccess::release_store(&_method, m); } void set_has_appendix(bool has_appendix) { diff --git a/src/hotspot/share/oops/resolvedMethodEntry.hpp b/src/hotspot/share/oops/resolvedMethodEntry.hpp index 097f7de8a56ec..c95efb751e961 100644 --- a/src/hotspot/share/oops/resolvedMethodEntry.hpp +++ b/src/hotspot/share/oops/resolvedMethodEntry.hpp @@ -26,7 +26,7 @@ #define SHARE_OOPS_RESOLVEDMETHODENTRY_HPP #include "interpreter/bytecodes.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/sizes.hpp" // ResolvedMethodEntry contains the resolution information for the invoke bytecodes @@ -145,7 +145,7 @@ class ResolvedMethodEntry { bool has_resolved_references_index() const { return (_flags & (1 << has_resolved_ref_shift)) != 0; } // Getters - Method* method() const { return Atomic::load_acquire(&_method); } + Method* method() const { return AtomicAccess::load_acquire(&_method); } InstanceKlass* interface_klass() const { assert(_bytecode1 == Bytecodes::_invokeinterface, "Only invokeinterface has a klass %d", _bytecode1); assert(_has_interface_klass, "sanity"); @@ -164,8 +164,8 @@ class ResolvedMethodEntry { u2 constant_pool_index() const { return _cpool_index; } u1 tos_state() const { return _tos_state; } u2 number_of_parameters() const { return _number_of_parameters; } - u1 bytecode1() const { return Atomic::load_acquire(&_bytecode1); } - u1 bytecode2() const { return Atomic::load_acquire(&_bytecode2); } + u1 bytecode1() const { return AtomicAccess::load_acquire(&_bytecode1); } + u1 bytecode2() const { return AtomicAccess::load_acquire(&_bytecode2); } bool is_resolved(Bytecodes::Code code) const { switch(code) { @@ -200,7 +200,7 @@ class ResolvedMethodEntry { volatile Bytecodes::Code c = (Bytecodes::Code)*code; assert(c == 0 || c == new_code || new_code == 0, "update must be consistent old: %d, new: %d", c, new_code); #endif - Atomic::release_store(code, new_code); + AtomicAccess::release_store(code, new_code); } void set_bytecode1(u1 b1) { @@ -212,7 +212,7 @@ class ResolvedMethodEntry { } void set_method(Method* m) { - Atomic::release_store(&_method, m); + AtomicAccess::release_store(&_method, m); } void set_klass(InstanceKlass* klass) { diff --git a/src/hotspot/share/oops/symbol.cpp b/src/hotspot/share/oops/symbol.cpp index 3a24a78936b5f..65a5015350c4b 100644 --- a/src/hotspot/share/oops/symbol.cpp +++ b/src/hotspot/share/oops/symbol.cpp @@ -33,7 +33,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/symbol.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "runtime/signature.hpp" @@ -295,7 +295,7 @@ bool Symbol::try_increment_refcount() { } else if (refc == 0) { return false; // dead, can't revive. } else { - found = Atomic::cmpxchg(&_hash_and_refcount, old_value, old_value + 1); + found = AtomicAccess::cmpxchg(&_hash_and_refcount, old_value, old_value + 1); if (found == old_value) { return true; // successfully updated. } @@ -314,7 +314,7 @@ void Symbol::increment_refcount() { } #ifndef PRODUCT if (refcount() != PERM_REFCOUNT) { // not a permanent symbol - NOT_PRODUCT(Atomic::inc(&_total_count);) + NOT_PRODUCT(AtomicAccess::inc(&_total_count);) } #endif } @@ -334,7 +334,7 @@ void Symbol::decrement_refcount() { fatal("refcount underflow"); return; } else { - found = Atomic::cmpxchg(&_hash_and_refcount, old_value, old_value - 1); + found = AtomicAccess::cmpxchg(&_hash_and_refcount, old_value, old_value - 1); if (found == old_value) { return; // successfully updated. } @@ -356,7 +356,7 @@ void Symbol::make_permanent() { return; } else { short hash = extract_hash(old_value); - found = Atomic::cmpxchg(&_hash_and_refcount, old_value, pack_hash_and_refcount(hash, PERM_REFCOUNT)); + found = AtomicAccess::cmpxchg(&_hash_and_refcount, old_value, pack_hash_and_refcount(hash, PERM_REFCOUNT)); if (found == old_value) { return; // successfully updated. } diff --git a/src/hotspot/share/oops/symbolHandle.cpp b/src/hotspot/share/oops/symbolHandle.cpp index 0ed441ebb0feb..359ef88af1b8a 100644 --- a/src/hotspot/share/oops/symbolHandle.cpp +++ b/src/hotspot/share/oops/symbolHandle.cpp @@ -22,7 +22,7 @@ */ #include "oops/symbolHandle.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" Symbol* volatile TempSymbolCleanupDelayer::_queue[QueueSize] = {}; volatile uint TempSymbolCleanupDelayer::_index = 0; @@ -33,14 +33,14 @@ volatile uint TempSymbolCleanupDelayer::_index = 0; void TempSymbolCleanupDelayer::delay_cleanup(Symbol* sym) { assert(sym != nullptr, "precondition"); sym->increment_refcount(); - uint i = Atomic::add(&_index, 1u) % QueueSize; - Symbol* old = Atomic::xchg(&_queue[i], sym); + uint i = AtomicAccess::add(&_index, 1u) % QueueSize; + Symbol* old = AtomicAccess::xchg(&_queue[i], sym); Symbol::maybe_decrement_refcount(old); } void TempSymbolCleanupDelayer::drain_queue() { for (uint i = 0; i < QueueSize; i++) { - Symbol* sym = Atomic::xchg(&_queue[i], (Symbol*) nullptr); + Symbol* sym = AtomicAccess::xchg(&_queue[i], (Symbol*) nullptr); Symbol::maybe_decrement_refcount(sym); } } diff --git a/src/hotspot/share/oops/trainingData.cpp b/src/hotspot/share/oops/trainingData.cpp index 8f906ae3d3785..c768d13fe591f 100644 --- a/src/hotspot/share/oops/trainingData.cpp +++ b/src/hotspot/share/oops/trainingData.cpp @@ -250,7 +250,7 @@ void CompileTrainingData::dec_init_deps_left_release(KlassTrainingData* ktd) { assert(_init_deps.contains(ktd), ""); assert(_init_deps_left > 0, ""); - uint init_deps_left1 = Atomic::sub(&_init_deps_left, 1); + uint init_deps_left1 = AtomicAccess::sub(&_init_deps_left, 1); if (log.is_enabled()) { uint init_deps_left2 = compute_init_deps_left(); diff --git a/src/hotspot/share/oops/trainingData.hpp b/src/hotspot/share/oops/trainingData.hpp index b909be1232435..9c645d437dc13 100644 --- a/src/hotspot/share/oops/trainingData.hpp +++ b/src/hotspot/share/oops/trainingData.hpp @@ -675,7 +675,7 @@ class CompileTrainingData : public TrainingData { } void dec_init_deps_left_release(KlassTrainingData* ktd); int init_deps_left_acquire() const { - return Atomic::load_acquire(&_init_deps_left); + return AtomicAccess::load_acquire(&_init_deps_left); } uint compute_init_deps_left(bool count_initialized = false); diff --git a/src/hotspot/share/oops/typeArrayOop.inline.hpp b/src/hotspot/share/oops/typeArrayOop.inline.hpp index a5a1e3200d18e..c431e3db16d69 100644 --- a/src/hotspot/share/oops/typeArrayOop.inline.hpp +++ b/src/hotspot/share/oops/typeArrayOop.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -156,10 +156,10 @@ inline void typeArrayOopDesc::double_at_put(int which, jdouble contents) { } inline jbyte typeArrayOopDesc::byte_at_acquire(int which) const { - return Atomic::load_acquire(byte_at_addr(which)); + return AtomicAccess::load_acquire(byte_at_addr(which)); } inline void typeArrayOopDesc::release_byte_at_put(int which, jbyte contents) { - Atomic::release_store(byte_at_addr(which), contents); + AtomicAccess::release_store(byte_at_addr(which), contents); } // Java thinks Symbol arrays are just arrays of either long or int, since diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp index 1a5bddd332ead..eff482350b68a 100644 --- a/src/hotspot/share/opto/escape.cpp +++ b/src/hotspot/share/opto/escape.cpp @@ -5067,7 +5067,7 @@ void ConnectionGraph::dump(GrowableArray& ptnodes_worklist) { } void ConnectionGraph::print_statistics() { - tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter)); + tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", AtomicAccess::load(&_no_escape_counter), AtomicAccess::load(&_arg_escape_counter), AtomicAccess::load(&_global_escape_counter)); } void ConnectionGraph::escape_state_statistics(GrowableArray& java_objects_worklist) { @@ -5078,11 +5078,11 @@ void ConnectionGraph::escape_state_statistics(GrowableArray& ja JavaObjectNode* ptn = java_objects_worklist.at(next); if (ptn->ideal_node()->is_Allocate()) { if (ptn->escape_state() == PointsToNode::NoEscape) { - Atomic::inc(&ConnectionGraph::_no_escape_counter); + AtomicAccess::inc(&ConnectionGraph::_no_escape_counter); } else if (ptn->escape_state() == PointsToNode::ArgEscape) { - Atomic::inc(&ConnectionGraph::_arg_escape_counter); + AtomicAccess::inc(&ConnectionGraph::_arg_escape_counter); } else if (ptn->escape_state() == PointsToNode::GlobalEscape) { - Atomic::inc(&ConnectionGraph::_global_escape_counter); + AtomicAccess::inc(&ConnectionGraph::_global_escape_counter); } else { assert(false, "Unexpected Escape State"); } diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 433dacc0ee1d2..ee3a3d3ba47bd 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -3638,16 +3638,16 @@ bool LibraryCallKit::inline_native_getEventWriter() { * if (carrierThread != thread) { // is virtual thread * const u2 vthread_epoch_raw = java_lang_Thread::jfr_epoch(thread); * bool excluded = vthread_epoch_raw & excluded_mask; - * Atomic::store(&tl->_contextual_tid, java_lang_Thread::tid(thread)); - * Atomic::store(&tl->_contextual_thread_excluded, is_excluded); + * AtomicAccess::store(&tl->_contextual_tid, java_lang_Thread::tid(thread)); + * AtomicAccess::store(&tl->_contextual_thread_excluded, is_excluded); * if (!excluded) { * const u2 vthread_epoch = vthread_epoch_raw & epoch_mask; - * Atomic::store(&tl->_vthread_epoch, vthread_epoch); + * AtomicAccess::store(&tl->_vthread_epoch, vthread_epoch); * } - * Atomic::release_store(&tl->_vthread, true); + * AtomicAccess::release_store(&tl->_vthread, true); * return; * } - * Atomic::release_store(&tl->_vthread, false); + * AtomicAccess::release_store(&tl->_vthread, false); */ void LibraryCallKit::extend_setCurrentThread(Node* jt, Node* thread) { enum { _true_path = 1, _false_path = 2, PATH_LIMIT }; diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index da2252f3d5349..a3e3be66583b7 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -830,7 +830,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { #ifndef PRODUCT if (bt == T_LONG) { - Atomic::inc(&_long_loop_candidates); + AtomicAccess::inc(&_long_loop_candidates); } #endif @@ -1148,7 +1148,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { #ifndef PRODUCT if (bt == T_LONG) { - Atomic::inc(&_long_loop_nests); + AtomicAccess::inc(&_long_loop_nests); } #endif @@ -2585,7 +2585,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_ #ifndef PRODUCT if (x->as_Loop()->is_loop_nest_inner_loop() && iv_bt == T_LONG) { - Atomic::inc(&_long_loop_counted_loops); + AtomicAccess::inc(&_long_loop_counted_loops); } #endif if (iv_bt == T_LONG && x->as_Loop()->is_loop_nest_outer_loop()) { diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp index a0b52358bac8e..88cb59c115fc2 100644 --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -147,7 +147,7 @@ void PhaseMacroExpand::eliminate_gc_barrier(Node* p2x) { bs->eliminate_gc_barrier(this, p2x); #ifndef PRODUCT if (PrintOptoStatistics) { - Atomic::inc(&PhaseMacroExpand::_GC_barriers_removed_counter); + AtomicAccess::inc(&PhaseMacroExpand::_GC_barriers_removed_counter); } #endif } @@ -2391,7 +2391,7 @@ void PhaseMacroExpand::eliminate_macro_nodes() { success = eliminate_locking_node(n->as_AbstractLock()); #ifndef PRODUCT if (success && PrintOptoStatistics) { - Atomic::inc(&PhaseMacroExpand::_monitor_objects_removed_counter); + AtomicAccess::inc(&PhaseMacroExpand::_monitor_objects_removed_counter); } #endif } @@ -2416,7 +2416,7 @@ void PhaseMacroExpand::eliminate_macro_nodes() { success = eliminate_allocate_node(n->as_Allocate()); #ifndef PRODUCT if (success && PrintOptoStatistics) { - Atomic::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter); + AtomicAccess::inc(&PhaseMacroExpand::_objs_scalar_replaced_counter); } #endif break; @@ -2456,7 +2456,7 @@ void PhaseMacroExpand::eliminate_macro_nodes() { #ifndef PRODUCT if (PrintOptoStatistics) { int membar_after = count_MemBar(C); - Atomic::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after); + AtomicAccess::add(&PhaseMacroExpand::_memory_barriers_removed_counter, membar_before - membar_after); } #endif } @@ -2681,10 +2681,10 @@ int PhaseMacroExpand::_GC_barriers_removed_counter = 0; int PhaseMacroExpand::_memory_barriers_removed_counter = 0; void PhaseMacroExpand::print_statistics() { - tty->print("Objects scalar replaced = %d, ", Atomic::load(&_objs_scalar_replaced_counter)); - tty->print("Monitor objects removed = %d, ", Atomic::load(&_monitor_objects_removed_counter)); - tty->print("GC barriers removed = %d, ", Atomic::load(&_GC_barriers_removed_counter)); - tty->print_cr("Memory barriers removed = %d", Atomic::load(&_memory_barriers_removed_counter)); + tty->print("Objects scalar replaced = %d, ", AtomicAccess::load(&_objs_scalar_replaced_counter)); + tty->print("Monitor objects removed = %d, ", AtomicAccess::load(&_monitor_objects_removed_counter)); + tty->print("GC barriers removed = %d, ", AtomicAccess::load(&_GC_barriers_removed_counter)); + tty->print_cr("Memory barriers removed = %d", AtomicAccess::load(&_memory_barriers_removed_counter)); } int PhaseMacroExpand::count_MemBar(Compile *C) { diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 3cee76534555d..0d148edda6e63 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -61,7 +61,7 @@ #include "opto/runtime.hpp" #include "opto/subnode.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -2206,7 +2206,7 @@ NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCount c->set_next(nullptr); head = _named_counters; c->set_next(head); - } while (Atomic::cmpxchg(&_named_counters, head, c) != head); + } while (AtomicAccess::cmpxchg(&_named_counters, head, c) != head); return c; } diff --git a/src/hotspot/share/opto/stringopts.cpp b/src/hotspot/share/opto/stringopts.cpp index 28936a04219f7..25aa82870c354 100644 --- a/src/hotspot/share/opto/stringopts.cpp +++ b/src/hotspot/share/opto/stringopts.cpp @@ -31,7 +31,7 @@ #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/stringopts.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/stubRoutines.hpp" #define __ kit. @@ -409,7 +409,7 @@ Node_List PhaseStringOpts::collect_toString_calls() { } } #ifndef PRODUCT - Atomic::add(&_stropts_total, encountered); + AtomicAccess::add(&_stropts_total, encountered); #endif return string_calls; } @@ -682,7 +682,7 @@ PhaseStringOpts::PhaseStringOpts(PhaseGVN* gvn): StringConcat* merged = sc->merge(other, arg); if (merged->validate_control_flow() && merged->validate_mem_flow()) { #ifndef PRODUCT - Atomic::inc(&_stropts_merged); + AtomicAccess::inc(&_stropts_merged); if (PrintOptimizeStringConcat) { tty->print_cr("stacking would succeed"); } @@ -2041,7 +2041,7 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) { string_sizes->disconnect_inputs(C); sc->cleanup(); #ifndef PRODUCT - Atomic::inc(&_stropts_replaced); + AtomicAccess::inc(&_stropts_replaced); #endif } diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index cd356863a8e9e..0e469dd7f8404 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -69,7 +69,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -2948,7 +2948,7 @@ static bool initializeDirectBufferSupport(JNIEnv* env, JavaThread* thread) { return false; } - if (Atomic::cmpxchg(&directBufferSupportInitializeStarted, 0, 1) == 0) { + if (AtomicAccess::cmpxchg(&directBufferSupportInitializeStarted, 0, 1) == 0) { if (!lookupDirectBufferClasses(env)) { directBufferSupportInitializeFailed = 1; return false; @@ -3418,7 +3418,7 @@ void copy_jni_function_table(const struct JNINativeInterface_ *new_jni_NativeInt intptr_t *a = (intptr_t *) jni_functions(); intptr_t *b = (intptr_t *) new_jni_NativeInterface; for (uint i=0; i < sizeof(struct JNINativeInterface_)/sizeof(void *); i++) { - Atomic::store(a++, *b++); + AtomicAccess::store(a++, *b++); } } @@ -3534,18 +3534,18 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { jint result = JNI_ERR; DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result); - // We're about to use Atomic::xchg for synchronization. Some Zero + // We're about to use AtomicAccess::xchg for synchronization. Some Zero // platforms use the GCC builtin __sync_lock_test_and_set for this, // but __sync_lock_test_and_set is not guaranteed to do what we want // on all architectures. So we check it works before relying on it. #if defined(ZERO) && defined(ASSERT) { jint a = 0xcafebabe; - jint b = Atomic::xchg(&a, (jint) 0xdeadbeef); + jint b = AtomicAccess::xchg(&a, (jint) 0xdeadbeef); void *c = &a; - void *d = Atomic::xchg(&c, &b); - assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works"); - assert(c == &b && d == &a, "Atomic::xchg() works"); + void *d = AtomicAccess::xchg(&c, &b); + assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "AtomicAccess::xchg() works"); + assert(c == &b && d == &a, "AtomicAccess::xchg() works"); } #endif // ZERO && ASSERT @@ -3556,10 +3556,10 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { // Threads. We do an atomic compare and exchange to ensure only // one thread can call this method at a time - // We use Atomic::xchg rather than Atomic::add/dec since on some platforms + // We use AtomicAccess::xchg rather than AtomicAccess::add/dec since on some platforms // the add/dec implementations are dependent on whether we are running - // on a multiprocessor Atomic::xchg does not have this problem. - if (Atomic::xchg(&vm_created, IN_PROGRESS) != NOT_CREATED) { + // on a multiprocessor AtomicAccess::xchg does not have this problem. + if (AtomicAccess::xchg(&vm_created, IN_PROGRESS) != NOT_CREATED) { return JNI_EEXIST; // already created, or create attempt in progress } @@ -3568,7 +3568,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { // cleared here. If a previous creation attempt succeeded and we then // destroyed that VM, we will be prevented from trying to recreate // the VM in the same process, as the value will still be 0. - if (Atomic::xchg(&safe_to_recreate_vm, 0) == 0) { + if (AtomicAccess::xchg(&safe_to_recreate_vm, 0) == 0) { return JNI_ERR; } @@ -3592,7 +3592,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { *vm = (JavaVM *)(&main_vm); *(JNIEnv**)penv = thread->jni_environment(); // mark creation complete for other JNI ops - Atomic::release_store(&vm_created, COMPLETE); + AtomicAccess::release_store(&vm_created, COMPLETE); #if INCLUDE_JVMCI if (EnableJVMCI) { @@ -3658,7 +3658,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { // reset vm_created last to avoid race condition. Use OrderAccess to // control both compiler and architectural-based reordering. assert(vm_created == IN_PROGRESS, "must be"); - Atomic::release_store(&vm_created, NOT_CREATED); + AtomicAccess::release_store(&vm_created, NOT_CREATED); } // Flush stdout and stderr before exit. diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 0651c173e7bcd..daacfd4ab7a04 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -71,7 +71,7 @@ #include "prims/jvmtiThreadState.inline.hpp" #include "prims/stackwalk.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/deoptimization.hpp" #include "runtime/globals_extension.hpp" diff --git a/src/hotspot/share/prims/jvmtiAgent.cpp b/src/hotspot/share/prims/jvmtiAgent.cpp index 192bba72fbca6..16a47042a6934 100644 --- a/src/hotspot/share/prims/jvmtiAgent.cpp +++ b/src/hotspot/share/prims/jvmtiAgent.cpp @@ -31,7 +31,7 @@ #include "prims/jvmtiEnvBase.hpp" #include "prims/jvmtiExport.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -83,7 +83,7 @@ JvmtiAgent::JvmtiAgent(const char* name, const char* options, bool is_absolute_p _xrun(false) {} JvmtiAgent* JvmtiAgent::next() const { - return Atomic::load_acquire(&_next); + return AtomicAccess::load_acquire(&_next); } const char* JvmtiAgent::name() const { diff --git a/src/hotspot/share/prims/jvmtiAgentList.cpp b/src/hotspot/share/prims/jvmtiAgentList.cpp index 3c01bee2cd266..8da5b75be4611 100644 --- a/src/hotspot/share/prims/jvmtiAgentList.cpp +++ b/src/hotspot/share/prims/jvmtiAgentList.cpp @@ -28,7 +28,7 @@ #include "prims/jvmtiAgentList.hpp" #include "prims/jvmtiEnvBase.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.inline.hpp" JvmtiAgent* JvmtiAgentList::_head = nullptr; @@ -102,10 +102,10 @@ void JvmtiAgentList::add(JvmtiAgent* agent) { // address of the pointer to add new agent (&_head when the list is empty or &agent->_next of the last agent in the list) JvmtiAgent** tail_ptr = &_head; while (true) { - JvmtiAgent* next = Atomic::load(tail_ptr); + JvmtiAgent* next = AtomicAccess::load(tail_ptr); if (next == nullptr) { // *tail_ptr == nullptr here - if (Atomic::cmpxchg(tail_ptr, (JvmtiAgent*)nullptr, agent) != nullptr) { + if (AtomicAccess::cmpxchg(tail_ptr, (JvmtiAgent*)nullptr, agent) != nullptr) { // another thread added an agent, reload next from tail_ptr continue; } @@ -135,7 +135,7 @@ static void assert_initialized(JvmtiAgentList::Iterator& it) { #endif JvmtiAgent* JvmtiAgentList::head() { - return Atomic::load_acquire(&_head); + return AtomicAccess::load_acquire(&_head); } // In case an agent did not enable the VMInit callback, or if it is an -Xrun agent, diff --git a/src/hotspot/share/prims/jvmtiEnvBase.hpp b/src/hotspot/share/prims/jvmtiEnvBase.hpp index 809385a0604dc..d7cc12f2fbd06 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.hpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp @@ -29,7 +29,7 @@ #include "prims/jvmtiEnvThreadState.hpp" #include "prims/jvmtiEventController.hpp" #include "prims/jvmtiThreadState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/frame.hpp" #include "runtime/javaThread.hpp" @@ -328,11 +328,11 @@ class JvmtiEnvBase : public CHeapObj { } JvmtiTagMap* tag_map_acquire() { - return Atomic::load_acquire(&_tag_map); + return AtomicAccess::load_acquire(&_tag_map); } void release_set_tag_map(JvmtiTagMap* tag_map) { - Atomic::release_store(&_tag_map, tag_map); + AtomicAccess::release_store(&_tag_map, tag_map); } // return true if event is enabled globally or for any thread diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index d20de5f44a3a0..c0a4ca949c993 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -40,7 +40,7 @@ #include "prims/jvmtiEventController.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiRedefineClasses.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" @@ -300,7 +300,7 @@ JvmtiBreakpoints *JvmtiCurrentBreakpoints::_jvmti_breakpoints = nullptr; JvmtiBreakpoints& JvmtiCurrentBreakpoints::get_jvmti_breakpoints() { if (_jvmti_breakpoints == nullptr) { JvmtiBreakpoints* breakpoints = new JvmtiBreakpoints(); - if (!Atomic::replace_if_null(&_jvmti_breakpoints, breakpoints)) { + if (!AtomicAccess::replace_if_null(&_jvmti_breakpoints, breakpoints)) { // already created concurently delete breakpoints; } diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.cpp b/src/hotspot/share/prims/jvmtiRawMonitor.cpp index a1655a7a05eac..85a7714fb29dc 100644 --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp @@ -24,7 +24,7 @@ #include "memory/allocation.inline.hpp" #include "prims/jvmtiRawMonitor.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" #include "runtime/orderAccess.hpp" @@ -122,7 +122,7 @@ JvmtiRawMonitor::is_valid() { void JvmtiRawMonitor::simple_enter(Thread* self) { for (;;) { - if (Atomic::replace_if_null(&_owner, self)) { + if (AtomicAccess::replace_if_null(&_owner, self)) { if (self->is_Java_thread()) { Continuation::pin(JavaThread::cast(self)); } @@ -137,7 +137,7 @@ void JvmtiRawMonitor::simple_enter(Thread* self) { node._next = _entry_list; _entry_list = &node; OrderAccess::fence(); - if (_owner == nullptr && Atomic::replace_if_null(&_owner, self)) { + if (_owner == nullptr && AtomicAccess::replace_if_null(&_owner, self)) { _entry_list = node._next; RawMonitor_lock->unlock(); if (self->is_Java_thread()) { @@ -154,7 +154,7 @@ void JvmtiRawMonitor::simple_enter(Thread* self) { void JvmtiRawMonitor::simple_exit(Thread* self) { guarantee(_owner == self, "invariant"); - Atomic::release_store(&_owner, (Thread*)nullptr); + AtomicAccess::release_store(&_owner, (Thread*)nullptr); OrderAccess::fence(); if (self->is_Java_thread()) { Continuation::unpin(JavaThread::cast(self)); @@ -324,7 +324,7 @@ void JvmtiRawMonitor::ExitOnSuspend::operator()(JavaThread* current) { // JavaThreads will enter here with state _thread_in_native. void JvmtiRawMonitor::raw_enter(Thread* self) { - // TODO Atomic::load on _owner field + // TODO AtomicAccess::load on _owner field if (_owner == self) { _recursions++; return; diff --git a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp index d4ee34b881ff2..4d841592501ce 100644 --- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp +++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp @@ -57,7 +57,7 @@ #include "prims/jvmtiThreadState.inline.hpp" #include "prims/methodComparator.hpp" #include "prims/resolvedMethodTable.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.inline.hpp" @@ -4537,7 +4537,7 @@ u8 VM_RedefineClasses::next_id() { while (true) { u8 id = _id_counter; u8 next_id = id + 1; - u8 result = Atomic::cmpxchg(&_id_counter, id, next_id); + u8 result = AtomicAccess::cmpxchg(&_id_counter, id, next_id); if (result == id) { return next_id; } diff --git a/src/hotspot/share/prims/jvmtiThreadState.cpp b/src/hotspot/share/prims/jvmtiThreadState.cpp index e76782e3d3385..419bd9f055069 100644 --- a/src/hotspot/share/prims/jvmtiThreadState.cpp +++ b/src/hotspot/share/prims/jvmtiThreadState.cpp @@ -307,7 +307,7 @@ JvmtiVTMSTransitionDisabler::JvmtiVTMSTransitionDisabler(bool is_SR) if (!sync_protocol_enabled_permanently()) { JvmtiVTMSTransitionDisabler::inc_sync_protocol_enabled_count(); if (is_SR) { - Atomic::store(&_sync_protocol_enabled_permanently, true); + AtomicAccess::store(&_sync_protocol_enabled_permanently, true); } } VTMS_transition_disable_for_all(); @@ -347,7 +347,7 @@ JvmtiVTMSTransitionDisabler::VTMS_transition_disable_for_one() { while (_SR_mode) { // suspender or resumer is a JvmtiVTMSTransitionDisabler monopolist ml.wait(10); // wait while there is an active suspender or resumer } - Atomic::inc(&_VTMS_transition_disable_for_one_count); + AtomicAccess::inc(&_VTMS_transition_disable_for_one_count); java_lang_Thread::inc_VTMS_transition_disable_count(vth()); while (java_lang_Thread::is_in_VTMS_transition(vth())) { @@ -377,7 +377,7 @@ JvmtiVTMSTransitionDisabler::VTMS_transition_disable_for_all() { ml.wait(10); // Wait while there is any active jvmtiVTMSTransitionDisabler. } } - Atomic::inc(&_VTMS_transition_disable_for_all_count); + AtomicAccess::inc(&_VTMS_transition_disable_for_all_count); // Block while some mount/unmount transitions are in progress. // Debug version fails and prints diagnostic information. @@ -415,7 +415,7 @@ JvmtiVTMSTransitionDisabler::VTMS_transition_enable_for_one() { } MonitorLocker ml(JvmtiVTMSTransition_lock); java_lang_Thread::dec_VTMS_transition_disable_count(vth()); - Atomic::dec(&_VTMS_transition_disable_for_one_count); + AtomicAccess::dec(&_VTMS_transition_disable_for_one_count); if (_VTMS_transition_disable_for_one_count == 0) { ml.notify_all(); } @@ -435,7 +435,7 @@ JvmtiVTMSTransitionDisabler::VTMS_transition_enable_for_all() { if (_is_SR) { // Disabler is suspender or resumer. _SR_mode = false; } - Atomic::dec(&_VTMS_transition_disable_for_all_count); + AtomicAccess::dec(&_VTMS_transition_disable_for_all_count); if (_VTMS_transition_disable_for_all_count == 0 || _is_SR) { ml.notify_all(); } diff --git a/src/hotspot/share/prims/jvmtiThreadState.hpp b/src/hotspot/share/prims/jvmtiThreadState.hpp index cec251613f36d..fab762c8f3871 100644 --- a/src/hotspot/share/prims/jvmtiThreadState.hpp +++ b/src/hotspot/share/prims/jvmtiThreadState.hpp @@ -101,10 +101,10 @@ class JvmtiVTMSTransitionDisabler : public AnyObj { static bool VTMS_notify_jvmti_events() { return _VTMS_notify_jvmti_events; } static void set_VTMS_notify_jvmti_events(bool val) { _VTMS_notify_jvmti_events = val; } - static void inc_sync_protocol_enabled_count() { Atomic::inc(&_sync_protocol_enabled_count); } - static void dec_sync_protocol_enabled_count() { Atomic::dec(&_sync_protocol_enabled_count); } - static int sync_protocol_enabled_count() { return Atomic::load(&_sync_protocol_enabled_count); } - static bool sync_protocol_enabled_permanently() { return Atomic::load(&_sync_protocol_enabled_permanently); } + static void inc_sync_protocol_enabled_count() { AtomicAccess::inc(&_sync_protocol_enabled_count); } + static void dec_sync_protocol_enabled_count() { AtomicAccess::dec(&_sync_protocol_enabled_count); } + static int sync_protocol_enabled_count() { return AtomicAccess::load(&_sync_protocol_enabled_count); } + static bool sync_protocol_enabled_permanently() { return AtomicAccess::load(&_sync_protocol_enabled_permanently); } static bool sync_protocol_enabled() { return sync_protocol_enabled_permanently() || sync_protocol_enabled_count() > 0; } diff --git a/src/hotspot/share/prims/resolvedMethodTable.cpp b/src/hotspot/share/prims/resolvedMethodTable.cpp index cdd41730dea5a..5475cfe986369 100644 --- a/src/hotspot/share/prims/resolvedMethodTable.cpp +++ b/src/hotspot/share/prims/resolvedMethodTable.cpp @@ -35,7 +35,7 @@ #include "oops/oop.inline.hpp" #include "oops/weakHandle.inline.hpp" #include "prims/resolvedMethodTable.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -215,11 +215,11 @@ oop ResolvedMethodTable::add_method(const Method* method, Handle rmethod_name) { } void ResolvedMethodTable::item_added() { - Atomic::inc(&_items_count); + AtomicAccess::inc(&_items_count); } void ResolvedMethodTable::item_removed() { - Atomic::dec(&_items_count); + AtomicAccess::dec(&_items_count); log_trace(membername, table) ("ResolvedMethod entry removed"); } @@ -258,12 +258,12 @@ void ResolvedMethodTable::gc_notification(size_t num_dead) { void ResolvedMethodTable::trigger_concurrent_work() { MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); - Atomic::store(&_has_work, true); + AtomicAccess::store(&_has_work, true); Service_lock->notify_all(); } bool ResolvedMethodTable::has_work() { - return Atomic::load_acquire(&_has_work); + return AtomicAccess::load_acquire(&_has_work); } void ResolvedMethodTable::do_concurrent_work(JavaThread* jt) { @@ -275,7 +275,7 @@ void ResolvedMethodTable::do_concurrent_work(JavaThread* jt) { } else { clean_dead_entries(jt); } - Atomic::release_store(&_has_work, false); + AtomicAccess::release_store(&_has_work, false); } void ResolvedMethodTable::grow(JavaThread* jt) { diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp index b4718a9a18ae8..4b2ffd5786054 100644 --- a/src/hotspot/share/prims/unsafe.cpp +++ b/src/hotspot/share/prims/unsafe.cpp @@ -737,13 +737,13 @@ UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeReference(JNIEnv *env, jobject un UNSAFE_ENTRY_SCOPED(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { oop p = JNIHandles::resolve(obj); volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); - return Atomic::cmpxchg(addr, e, x); + return AtomicAccess::cmpxchg(addr, e, x); } UNSAFE_END UNSAFE_ENTRY_SCOPED(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { oop p = JNIHandles::resolve(obj); volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); - return Atomic::cmpxchg(addr, e, x); + return AtomicAccess::cmpxchg(addr, e, x); } UNSAFE_END UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetReference(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject e_h, jobject x_h)) { @@ -758,13 +758,13 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetReference(JNIEnv *env, jobject unsafe UNSAFE_ENTRY_SCOPED(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) { oop p = JNIHandles::resolve(obj); volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset); - return Atomic::cmpxchg(addr, e, x) == e; + return AtomicAccess::cmpxchg(addr, e, x) == e; } UNSAFE_END UNSAFE_ENTRY_SCOPED(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) { oop p = JNIHandles::resolve(obj); volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset); - return Atomic::cmpxchg(addr, e, x) == e; + return AtomicAccess::cmpxchg(addr, e, x) == e; } UNSAFE_END static void post_thread_park_event(EventThreadPark* event, const oop obj, jlong timeout_nanos, jlong until_epoch_millis) { diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 4b3583edf511a..afaa089e0b2c7 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -74,7 +74,7 @@ #include "prims/wbtestmethods/parserTests.hpp" #include "prims/whitebox.inline.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/flags/jvmFlag.hpp" @@ -2304,7 +2304,7 @@ WB_ENTRY(jint, WB_HandshakeWalkStack(JNIEnv* env, jobject wb, jobject thread_han jt->print_on(tty); jt->print_stack_on(tty); tty->cr(); - Atomic::inc(&_num_threads_completed); + AtomicAccess::inc(&_num_threads_completed); } public: @@ -2371,14 +2371,14 @@ WB_ENTRY(void, WB_LockAndBlock(JNIEnv* env, jobject wb, jboolean suspender)) // We will deadlock here if we are 'suspender' and 'suspendee' // suspended in ~ThreadBlockInVM. This verifies we only suspend // at the right place. - while (Atomic::cmpxchg(&_emulated_lock, 0, 1) != 0) {} + while (AtomicAccess::cmpxchg(&_emulated_lock, 0, 1) != 0) {} assert(_emulated_lock == 1, "Must be locked"); // Sleep much longer in suspendee to force situation where // 'suspender' is waiting above to acquire lock. os::naked_short_sleep(suspender ? 1 : 10); } - Atomic::store(&_emulated_lock, 0); + AtomicAccess::store(&_emulated_lock, 0); WB_END // Some convenience methods to deal with objects from java diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomicAccess.hpp similarity index 89% rename from src/hotspot/share/runtime/atomic.hpp rename to src/hotspot/share/runtime/atomicAccess.hpp index bf198b5f5621f..0ee60d2b7b7db 100644 --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomicAccess.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_RUNTIME_ATOMIC_HPP -#define SHARE_RUNTIME_ATOMIC_HPP +#ifndef SHARE_RUNTIME_ATOMICACCESS_HPP +#define SHARE_RUNTIME_ATOMICACCESS_HPP #include "memory/allocation.hpp" #include "metaprogramming/enableIf.hpp" @@ -54,7 +54,7 @@ enum ScopedFenceType { , RELEASE_X_FENCE }; -class Atomic : AllStatic { +class AtomicAccess : AllStatic { public: // Atomic operations on int64 types are required to be available on // all platforms. At a minimum a 64-bit cmpxchg must be available @@ -491,7 +491,7 @@ class Atomic : AllStatic { }; template -struct Atomic::IsPointerConvertible : AllStatic { +struct AtomicAccess::IsPointerConvertible : AllStatic { // Determine whether From* is implicitly convertible to To*, using // the "sizeof trick". typedef char yes; @@ -506,7 +506,7 @@ struct Atomic::IsPointerConvertible : AllStatic { // Handle load for pointer and integral types. template -struct Atomic::LoadImpl< +struct AtomicAccess::LoadImpl< T, PlatformOp, typename EnableIf::value || std::is_pointer::value>::type> @@ -525,7 +525,7 @@ struct Atomic::LoadImpl< // arguments, and returns the recovered result of that translated // call. template -struct Atomic::LoadImpl< +struct AtomicAccess::LoadImpl< T, PlatformOp, typename EnableIf::value>::type> @@ -544,9 +544,9 @@ struct Atomic::LoadImpl< // For increased safety, the default implementation only allows // load types that are pointer sized or smaller. If a platform still // supports wide atomics, then it has to use specialization -// of Atomic::PlatformLoad for that wider size class. +// of AtomicAccess::PlatformLoad for that wider size class. template -struct Atomic::PlatformLoad { +struct AtomicAccess::PlatformLoad { template T operator()(T const volatile* dest) const { STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization @@ -558,7 +558,7 @@ struct Atomic::PlatformLoad { // // All the involved types must be identical. template -struct Atomic::StoreImpl< +struct AtomicAccess::StoreImpl< T, T, PlatformOp, typename EnableIf::value>::type> @@ -575,10 +575,10 @@ struct Atomic::StoreImpl< // destination's type; it must be type-correct to store the // new_value in the destination. template -struct Atomic::StoreImpl< +struct AtomicAccess::StoreImpl< D*, T*, PlatformOp, - typename EnableIf::value>::type> + typename EnableIf::value>::type> { void operator()(D* volatile* dest, T* new_value) const { // Allow derived to base conversion, and adding cv-qualifiers. @@ -594,7 +594,7 @@ struct Atomic::StoreImpl< // This translates the original call into a call on the decayed // arguments. template -struct Atomic::StoreImpl< +struct AtomicAccess::StoreImpl< T, T, PlatformOp, typename EnableIf::value>::type> @@ -613,9 +613,9 @@ struct Atomic::StoreImpl< // For increased safety, the default implementation only allows // storing types that are pointer sized or smaller. If a platform still // supports wide atomics, then it has to use specialization -// of Atomic::PlatformStore for that wider size class. +// of AtomicAccess::PlatformStore for that wider size class. template -struct Atomic::PlatformStore { +struct AtomicAccess::PlatformStore { template void operator()(T volatile* dest, T new_value) const { @@ -625,23 +625,23 @@ struct Atomic::PlatformStore { }; template -inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { +inline void AtomicAccess::inc(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(std::is_pointer::value || std::is_integral::value); using I = std::conditional_t::value, ptrdiff_t, D>; - Atomic::add(dest, I(1), order); + AtomicAccess::add(dest, I(1), order); } template -inline void Atomic::dec(D volatile* dest, atomic_memory_order order) { +inline void AtomicAccess::dec(D volatile* dest, atomic_memory_order order) { STATIC_ASSERT(std::is_pointer::value || std::is_integral::value); using I = std::conditional_t::value, ptrdiff_t, D>; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) - Atomic::add(dest, I(-1), order); + AtomicAccess::add(dest, I(-1), order); } template -inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) { +inline D AtomicAccess::sub(D volatile* dest, I sub_value, atomic_memory_order order) { STATIC_ASSERT(std::is_pointer::value || std::is_integral::value); STATIC_ASSERT(std::is_integral::value); // If D is a pointer type, use [u]intptr_t as the addend type, @@ -654,7 +654,7 @@ inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) { AddendType addend = sub_value; // Assumes two's complement integer representation. #pragma warning(suppress: 4146) // In case AddendType is not signed. - return Atomic::add(dest, -addend, order); + return AtomicAccess::add(dest, -addend, order); } // Define the class before including platform file, which may specialize @@ -663,7 +663,7 @@ inline D Atomic::sub(D volatile* dest, I sub_value, atomic_memory_order order) { // specializations of the class. The platform file is responsible for // providing those. template -struct Atomic::PlatformCmpxchg { +struct AtomicAccess::PlatformCmpxchg { template T operator()(T volatile* dest, T compare_value, @@ -674,7 +674,7 @@ struct Atomic::PlatformCmpxchg { // Define the class before including platform file, which may use this // as a base class, requiring it be complete. The definition is later // in this file, near the other definitions related to cmpxchg. -struct Atomic::CmpxchgByteUsingInt { +struct AtomicAccess::CmpxchgByteUsingInt { static uint8_t get_byte_in_int(uint32_t n, uint32_t idx); static uint32_t set_byte_in_int(uint32_t n, uint8_t b, uint32_t idx); template @@ -688,7 +688,7 @@ struct Atomic::CmpxchgByteUsingInt { // as a base class, requiring it be complete. The definition is later // in this file, near the other definitions related to xchg. template -struct Atomic::XchgUsingCmpxchg { +struct AtomicAccess::XchgUsingCmpxchg { template T operator()(T volatile* dest, T exchange_value, @@ -698,7 +698,7 @@ struct Atomic::XchgUsingCmpxchg { // Define the class before including platform file, which may use this // as a base class, requiring it be complete. template -class Atomic::AddUsingCmpxchg { +class AtomicAccess::AddUsingCmpxchg { public: template static inline D add_then_fetch(D volatile* dest, @@ -718,9 +718,9 @@ class Atomic::AddUsingCmpxchg { D old_value; D new_value; do { - old_value = Atomic::load(dest); + old_value = AtomicAccess::load(dest); new_value = old_value + add_value; - } while (old_value != Atomic::cmpxchg(dest, old_value, new_value, order)); + } while (old_value != AtomicAccess::cmpxchg(dest, old_value, new_value, order)); return old_value; } }; @@ -731,7 +731,7 @@ class Atomic::AddUsingCmpxchg { // specializations of the class. The platform file is responsible for // providing those. template -struct Atomic::PlatformXchg { +struct AtomicAccess::PlatformXchg { template T operator()(T volatile* dest, T exchange_value, @@ -739,16 +739,16 @@ struct Atomic::PlatformXchg { }; // Implement fetch_then_bitop operations using a CAS loop. -class Atomic::PrefetchBitopsUsingCmpxchg { +class AtomicAccess::PrefetchBitopsUsingCmpxchg { template T bitop(T volatile* dest, atomic_memory_order order, Op operation) const { T old_value; T new_value; - T fetched_value = Atomic::load(dest); + T fetched_value = AtomicAccess::load(dest); do { old_value = fetched_value; new_value = operation(old_value); - fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order); + fetched_value = AtomicAccess::cmpxchg(dest, old_value, new_value, order); } while (old_value != fetched_value); return fetched_value; } @@ -771,16 +771,16 @@ class Atomic::PrefetchBitopsUsingCmpxchg { }; // Implement bitop_then_fetch operations using a CAS loop. -class Atomic::PostfetchBitopsUsingCmpxchg { +class AtomicAccess::PostfetchBitopsUsingCmpxchg { template T bitop(T volatile* dest, atomic_memory_order order, Op operation) const { T old_value; T new_value; - T fetched_value = Atomic::load(dest); + T fetched_value = AtomicAccess::load(dest); do { old_value = fetched_value; new_value = operation(old_value); - fetched_value = Atomic::cmpxchg(dest, old_value, new_value, order); + fetched_value = AtomicAccess::cmpxchg(dest, old_value, new_value, order); } while (old_value != fetched_value); return new_value; } @@ -804,21 +804,21 @@ class Atomic::PostfetchBitopsUsingCmpxchg { // Implement bitop_then_fetch operations by calling fetch_then_bitop and // applying the operation to the result and the bits argument. -class Atomic::PostfetchBitopsUsingPrefetch { +class AtomicAccess::PostfetchBitopsUsingPrefetch { public: template T and_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { - return bits & Atomic::fetch_then_and(dest, bits, order); + return bits & AtomicAccess::fetch_then_and(dest, bits, order); } template T or_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { - return bits | Atomic::fetch_then_or(dest, bits, order); + return bits | AtomicAccess::fetch_then_or(dest, bits, order); } template T xor_then_fetch(T volatile* dest, T bits, atomic_memory_order order) const { - return bits ^ Atomic::fetch_then_xor(dest, bits, order); + return bits ^ AtomicAccess::fetch_then_xor(dest, bits, order); } }; @@ -826,7 +826,7 @@ class Atomic::PostfetchBitopsUsingPrefetch { // partial specialization providing size, either as a template parameter or as // a specific value. template -class Atomic::PlatformBitops +class AtomicAccess::PlatformBitops : public PrefetchBitopsUsingCmpxchg, public PostfetchBitopsUsingCmpxchg {}; @@ -869,62 +869,62 @@ class ScopedFence : public ScopedFenceGeneral { #endif template -inline T Atomic::load(const volatile T* dest) { +inline T AtomicAccess::load(const volatile T* dest) { return LoadImpl >()(dest); } template -struct Atomic::PlatformOrderedLoad { +struct AtomicAccess::PlatformOrderedLoad { template T operator()(const volatile T* p) const { ScopedFence f((void*)p); - return Atomic::load(p); + return AtomicAccess::load(p); } }; template -inline T Atomic::load_acquire(const volatile T* p) { +inline T AtomicAccess::load_acquire(const volatile T* p) { return LoadImpl >()(p); } template -inline void Atomic::store(volatile D* dest, T store_value) { +inline void AtomicAccess::store(volatile D* dest, T store_value) { StoreImpl >()(dest, store_value); } template -struct Atomic::PlatformOrderedStore { +struct AtomicAccess::PlatformOrderedStore { template void operator()(volatile T* p, T v) const { ScopedFence f((void*)p); - Atomic::store(p, v); + AtomicAccess::store(p, v); } }; template -inline void Atomic::release_store(volatile D* p, T v) { +inline void AtomicAccess::release_store(volatile D* p, T v) { StoreImpl >()(p, v); } template -inline void Atomic::release_store_fence(volatile D* p, T v) { +inline void AtomicAccess::release_store_fence(volatile D* p, T v) { StoreImpl >()(p, v); } template -inline D Atomic::add(D volatile* dest, I add_value, - atomic_memory_order order) { +inline D AtomicAccess::add(D volatile* dest, I add_value, + atomic_memory_order order) { return AddImpl::add_then_fetch(dest, add_value, order); } template -inline D Atomic::fetch_then_add(D volatile* dest, I add_value, - atomic_memory_order order) { +inline D AtomicAccess::fetch_then_add(D volatile* dest, I add_value, + atomic_memory_order order) { return AddImpl::fetch_then_add(dest, add_value, order); } template -struct Atomic::AddImpl< +struct AtomicAccess::AddImpl< D, I, typename EnableIf::value && std::is_integral::value && @@ -942,7 +942,7 @@ struct Atomic::AddImpl< }; template -struct Atomic::AddImpl< +struct AtomicAccess::AddImpl< P*, I, typename EnableIf::value && (sizeof(I) <= sizeof(P*))>::type> { @@ -989,23 +989,23 @@ struct Atomic::AddImpl< }; template -inline D Atomic::add_using_helper(Fn fn, D volatile* dest, I add_value) { +inline D AtomicAccess::add_using_helper(Fn fn, D volatile* dest, I add_value) { return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(add_value), reinterpret_cast(dest))); } template -inline D Atomic::cmpxchg(D volatile* dest, - U compare_value, - T exchange_value, - atomic_memory_order order) { +inline D AtomicAccess::cmpxchg(D volatile* dest, + U compare_value, + T exchange_value, + atomic_memory_order order) { return CmpxchgImpl()(dest, compare_value, exchange_value, order); } template -inline bool Atomic::replace_if_null(D* volatile* dest, T* value, - atomic_memory_order order) { +inline bool AtomicAccess::replace_if_null(D* volatile* dest, T* value, + atomic_memory_order order) { // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. @@ -1017,7 +1017,7 @@ inline bool Atomic::replace_if_null(D* volatile* dest, T* value, // // All the involved types must be identical. template -struct Atomic::CmpxchgImpl< +struct AtomicAccess::CmpxchgImpl< T, T, T, typename EnableIf::value>::type> { @@ -1041,9 +1041,9 @@ struct Atomic::CmpxchgImpl< // destination's type; it must be type-correct to store the // exchange_value in the destination. template -struct Atomic::CmpxchgImpl< +struct AtomicAccess::CmpxchgImpl< D*, U*, T*, - typename EnableIf::value && + typename EnableIf::value && std::is_same, std::remove_cv_t>::value>::type> { @@ -1066,7 +1066,7 @@ struct Atomic::CmpxchgImpl< // arguments, and returns the recovered result of that translated // call. template -struct Atomic::CmpxchgImpl< +struct AtomicAccess::CmpxchgImpl< T, T, T, typename EnableIf::value>::type> { @@ -1084,10 +1084,10 @@ struct Atomic::CmpxchgImpl< }; template -inline T Atomic::cmpxchg_using_helper(Fn fn, - T volatile* dest, - T compare_value, - T exchange_value) { +inline T AtomicAccess::cmpxchg_using_helper(Fn fn, + T volatile* dest, + T compare_value, + T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return PrimitiveConversions::cast( fn(PrimitiveConversions::cast(exchange_value), @@ -1095,25 +1095,25 @@ inline T Atomic::cmpxchg_using_helper(Fn fn, PrimitiveConversions::cast(compare_value))); } -inline uint32_t Atomic::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n, - uint8_t b, - uint32_t idx) { +inline uint32_t AtomicAccess::CmpxchgByteUsingInt::set_byte_in_int(uint32_t n, + uint8_t b, + uint32_t idx) { uint32_t bitsIdx = BitsPerByte * idx; return (n & ~(static_cast(0xff) << bitsIdx)) | (static_cast(b) << bitsIdx); } -inline uint8_t Atomic::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n, - uint32_t idx) { +inline uint8_t AtomicAccess::CmpxchgByteUsingInt::get_byte_in_int(uint32_t n, + uint32_t idx) { uint32_t bitsIdx = BitsPerByte * idx; return (uint8_t)(n >> bitsIdx); } template -inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::CmpxchgByteUsingInt::operator()(T volatile* dest, + T compare_value, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); uint8_t canon_exchange_value = exchange_value; uint8_t canon_compare_value = compare_value; @@ -1127,7 +1127,7 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different - uint32_t cur = set_byte_in_int(Atomic::load(aligned_dest), canon_compare_value, idx); + uint32_t cur = set_byte_in_int(AtomicAccess::load(aligned_dest), canon_compare_value, idx); // always execute a real cmpxchg so that we get the required memory // barriers even on initial failure @@ -1152,7 +1152,7 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T volatile* dest, // // All the involved types must be identical. template -struct Atomic::XchgImpl< +struct AtomicAccess::XchgImpl< T, T, typename EnableIf::value>::type> { @@ -1168,9 +1168,9 @@ struct Atomic::XchgImpl< // destination's type; it must be type-correct to store the // exchange_value in the destination. template -struct Atomic::XchgImpl< +struct AtomicAccess::XchgImpl< D*, T*, - typename EnableIf::value>::type> + typename EnableIf::value>::type> { D* operator()(D* volatile* dest, T* exchange_value, atomic_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. @@ -1187,7 +1187,7 @@ struct Atomic::XchgImpl< // arguments, and returns the recovered result of that translated // call. template -struct Atomic::XchgImpl< +struct AtomicAccess::XchgImpl< T, T, typename EnableIf::value>::type> { @@ -1203,9 +1203,9 @@ struct Atomic::XchgImpl< }; template -inline T Atomic::xchg_using_helper(Fn fn, - T volatile* dest, - T exchange_value) { +inline T AtomicAccess::xchg_using_helper(Fn fn, + T volatile* dest, + T exchange_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); // Notice the swapped order of arguments. Change when/if stubs are rewritten. return PrimitiveConversions::cast( @@ -1214,22 +1214,22 @@ inline T Atomic::xchg_using_helper(Fn fn, } template -inline D Atomic::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) { +inline D AtomicAccess::xchg(volatile D* dest, T exchange_value, atomic_memory_order order) { return XchgImpl()(dest, exchange_value, order); } template template -inline T Atomic::XchgUsingCmpxchg::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { +inline T AtomicAccess::XchgUsingCmpxchg::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); T old_value; do { - old_value = Atomic::load(dest); - } while (old_value != Atomic::cmpxchg(dest, old_value, exchange_value, order)); + old_value = AtomicAccess::load(dest); + } while (old_value != AtomicAccess::cmpxchg(dest, old_value, exchange_value, order)); return old_value; } -#endif // SHARE_RUNTIME_ATOMIC_HPP +#endif // SHARE_RUNTIME_ATOMICACCESS_HPP diff --git a/src/hotspot/share/runtime/basicLock.hpp b/src/hotspot/share/runtime/basicLock.hpp index 8ed38747c7448..c22416a1c06af 100644 --- a/src/hotspot/share/runtime/basicLock.hpp +++ b/src/hotspot/share/runtime/basicLock.hpp @@ -26,7 +26,7 @@ #define SHARE_RUNTIME_BASICLOCK_HPP #include "oops/markWord.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/sizes.hpp" @@ -39,8 +39,8 @@ class BasicLock { // be nullptr or the ObjectMonitor* used when locking. volatile uintptr_t _metadata; - uintptr_t get_metadata() const { return Atomic::load(&_metadata); } - void set_metadata(uintptr_t value) { Atomic::store(&_metadata, value); } + uintptr_t get_metadata() const { return AtomicAccess::load(&_metadata); } + void set_metadata(uintptr_t value) { AtomicAccess::store(&_metadata, value); } static int metadata_offset_in_bytes() { return (int)offset_of(BasicLock, _metadata); } public: diff --git a/src/hotspot/share/runtime/continuationJavaClasses.inline.hpp b/src/hotspot/share/runtime/continuationJavaClasses.inline.hpp index 0e8bf2d656381..f464648cdc3fa 100644 --- a/src/hotspot/share/runtime/continuationJavaClasses.inline.hpp +++ b/src/hotspot/share/runtime/continuationJavaClasses.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/stackChunkOop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" inline oop jdk_internal_vm_Continuation::scope(oop continuation) { return continuation->obj_field(_scope_offset); @@ -154,23 +154,23 @@ inline void jdk_internal_vm_StackChunk::set_bottom(oop chunk, int value) { } inline uint8_t jdk_internal_vm_StackChunk::flags(oop chunk) { - return Atomic::load(chunk->field_addr(_flags_offset)); + return AtomicAccess::load(chunk->field_addr(_flags_offset)); } inline void jdk_internal_vm_StackChunk::set_flags(oop chunk, uint8_t value) { - Atomic::store(chunk->field_addr(_flags_offset), value); + AtomicAccess::store(chunk->field_addr(_flags_offset), value); } inline uint8_t jdk_internal_vm_StackChunk::flags_acquire(oop chunk) { - return Atomic::load_acquire(chunk->field_addr(_flags_offset)); + return AtomicAccess::load_acquire(chunk->field_addr(_flags_offset)); } inline void jdk_internal_vm_StackChunk::release_set_flags(oop chunk, uint8_t value) { - Atomic::release_store(chunk->field_addr(_flags_offset), value); + AtomicAccess::release_store(chunk->field_addr(_flags_offset), value); } inline bool jdk_internal_vm_StackChunk::try_set_flags(oop chunk, uint8_t expected_value, uint8_t new_value) { - return Atomic::cmpxchg(chunk->field_addr(_flags_offset), expected_value, new_value) == expected_value; + return AtomicAccess::cmpxchg(chunk->field_addr(_flags_offset), expected_value, new_value) == expected_value; } inline int jdk_internal_vm_StackChunk::maxThawingSize(oop chunk) { @@ -187,11 +187,11 @@ inline void jdk_internal_vm_StackChunk::set_maxThawingSize(oop chunk, int value) // lockStackSize is read concurrently by GC threads so we use Atomic. inline uint8_t jdk_internal_vm_StackChunk::lockStackSize(oop chunk) { - return Atomic::load(chunk->field_addr(_lockStackSize_offset)); + return AtomicAccess::load(chunk->field_addr(_lockStackSize_offset)); } inline void jdk_internal_vm_StackChunk::set_lockStackSize(oop chunk, uint8_t value) { - Atomic::store(chunk->field_addr(_lockStackSize_offset), value); + AtomicAccess::store(chunk->field_addr(_lockStackSize_offset), value); } #endif // SHARE_RUNTIME_CONTINUATIONJAVACLASSES_INLINE_HPP diff --git a/src/hotspot/share/runtime/cpuTimeCounters.cpp b/src/hotspot/share/runtime/cpuTimeCounters.cpp index c6bcffbd024c3..c7e484416627f 100644 --- a/src/hotspot/share/runtime/cpuTimeCounters.cpp +++ b/src/hotspot/share/runtime/cpuTimeCounters.cpp @@ -23,7 +23,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/cpuTimeCounters.hpp" const char* CPUTimeGroups::to_string(CPUTimeType val) { @@ -70,14 +70,14 @@ CPUTimeCounters::CPUTimeCounters() : void CPUTimeCounters::inc_gc_total_cpu_time(jlong diff) { CPUTimeCounters* instance = CPUTimeCounters::get_instance(); - Atomic::add(&(instance->_gc_total_cpu_time_diff), diff); + AtomicAccess::add(&(instance->_gc_total_cpu_time_diff), diff); } void CPUTimeCounters::publish_gc_total_cpu_time() { CPUTimeCounters* instance = CPUTimeCounters::get_instance(); // Atomically fetch the current _gc_total_cpu_time_diff and reset it to zero. jlong new_value = 0; - jlong fetched_value = Atomic::xchg(&(instance->_gc_total_cpu_time_diff), new_value); + jlong fetched_value = AtomicAccess::xchg(&(instance->_gc_total_cpu_time_diff), new_value); get_counter(CPUTimeGroups::CPUTimeType::gc_total)->inc(fetched_value); } diff --git a/src/hotspot/share/runtime/cpuTimeCounters.hpp b/src/hotspot/share/runtime/cpuTimeCounters.hpp index 3137caf43592e..efa44f9173d16 100644 --- a/src/hotspot/share/runtime/cpuTimeCounters.hpp +++ b/src/hotspot/share/runtime/cpuTimeCounters.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2023 Google LLC. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -64,7 +64,7 @@ class CPUTimeCounters: public CHeapObj { // A long which atomically tracks how much CPU time has been spent doing GC // since the last time we called `publish_total_cpu_time()`. - // It is incremented using Atomic::add() to prevent race conditions, and + // It is incremented using AtomicAccess::add() to prevent race conditions, and // is added to the `gc_total` CPUTimeType at the end of GC. volatile jlong _gc_total_cpu_time_diff; diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 693815c6fc477..d14db6ad0edda 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -61,7 +61,7 @@ #include "prims/jvmtiThreadState.hpp" #include "prims/methodHandles.hpp" #include "prims/vectorSupport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/basicLock.inline.hpp" #include "runtime/continuation.hpp" #include "runtime/continuationEntry.inline.hpp" @@ -135,7 +135,7 @@ void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) { nmethod::DeoptimizationStatus status = inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate; - Atomic::store(&nm->_deoptimization_status, status); + AtomicAccess::store(&nm->_deoptimization_status, status); // Make sure active is not committed assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be"); @@ -1119,7 +1119,7 @@ template class Box static BoxCache* singleton(Thread* thread) { if (_singleton == nullptr) { BoxCache* s = new BoxCache(thread); - if (!Atomic::replace_if_null(&_singleton, s)) { + if (!AtomicAccess::replace_if_null(&_singleton, s)) { delete s; } } @@ -1182,7 +1182,7 @@ class BooleanBoxCache : public BoxCacheBase { static BooleanBoxCache* singleton(Thread* thread) { if (_singleton == nullptr) { BooleanBoxCache* s = new BooleanBoxCache(thread); - if (!Atomic::replace_if_null(&_singleton, s)) { + if (!AtomicAccess::replace_if_null(&_singleton, s)) { delete s; } } @@ -1971,7 +1971,7 @@ class DeoptActionSerializer : public JfrSerializer { static void register_serializers() { static int critical_section = 0; - if (1 == critical_section || Atomic::cmpxchg(&critical_section, 0, 1) == 1) { + if (1 == critical_section || AtomicAccess::cmpxchg(&critical_section, 0, 1) == 1) { return; } JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONREASON, true, new DeoptReasonSerializer()); diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp index f9e48b7f711ad..e9a00c2a3bc24 100644 --- a/src/hotspot/share/runtime/handshake.cpp +++ b/src/hotspot/share/runtime/handshake.cpp @@ -29,7 +29,7 @@ #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiThreadState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/handshake.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -74,12 +74,12 @@ class HandshakeOperation : public CHeapObj { void prepare(JavaThread* current_target, Thread* executing_thread); void do_handshake(JavaThread* thread); bool is_completed() { - int32_t val = Atomic::load(&_pending_threads); + int32_t val = AtomicAccess::load(&_pending_threads); assert(val >= 0, "_pending_threads=%d cannot be negative", val); return val == 0; } - void add_target_count(int count) { Atomic::add(&_pending_threads, count); } - int32_t pending_threads() { return Atomic::load(&_pending_threads); } + void add_target_count(int count) { AtomicAccess::add(&_pending_threads, count); } + int32_t pending_threads() { return AtomicAccess::load(&_pending_threads); } const char* name() { return _handshake_cl->name(); } bool is_async() { return _handshake_cl->is_async(); } bool is_suspend() { return _handshake_cl->is_suspend(); } @@ -348,7 +348,7 @@ void HandshakeOperation::do_handshake(JavaThread* thread) { // here to make sure memory operations executed in the handshake // closure are visible to the VMThread/Handshaker after it reads // that the operation has completed. - Atomic::dec(&_pending_threads); + AtomicAccess::dec(&_pending_threads); // Trailing fence, used to make sure removal of the operation strictly // happened after we completed the operation. diff --git a/src/hotspot/share/runtime/handshake.hpp b/src/hotspot/share/runtime/handshake.hpp index c6f3aad08db19..1304dca12b7e8 100644 --- a/src/hotspot/share/runtime/handshake.hpp +++ b/src/hotspot/share/runtime/handshake.hpp @@ -108,7 +108,7 @@ class HandshakeState { HandshakeOperation* get_op(); void remove_op(HandshakeOperation* op); - void set_active_handshaker(Thread* thread) { Atomic::store(&_active_handshaker, thread); } + void set_active_handshaker(Thread* thread) { AtomicAccess::store(&_active_handshaker, thread); } class MatchOp { HandshakeOperation* _op; @@ -147,7 +147,7 @@ class HandshakeState { }; ProcessResult try_process(HandshakeOperation* match_op); - Thread* active_handshaker() const { return Atomic::load(&_active_handshaker); } + Thread* active_handshaker() const { return AtomicAccess::load(&_active_handshaker); } // Support for asynchronous exceptions private: diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp index 56e6ea30c0a9b..adc49f84358b3 100644 --- a/src/hotspot/share/runtime/init.cpp +++ b/src/hotspot/share/runtime/init.cpp @@ -36,7 +36,7 @@ #include "prims/downcallLinker.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/flags/jvmFlag.hpp" #include "runtime/globals.hpp" @@ -238,7 +238,7 @@ void exit_globals() { static volatile bool _init_completed = false; bool is_init_completed() { - return Atomic::load_acquire(&_init_completed); + return AtomicAccess::load_acquire(&_init_completed); } void wait_init_completed() { @@ -251,6 +251,6 @@ void wait_init_completed() { void set_init_completed() { assert(Universe::is_fully_initialized(), "Should have completed initialization"); MonitorLocker ml(InitCompleted_lock, Monitor::_no_safepoint_check_flag); - Atomic::release_store(&_init_completed, true); + AtomicAccess::release_store(&_init_completed, true); ml.notify_all(); } diff --git a/src/hotspot/share/runtime/interfaceSupport.cpp b/src/hotspot/share/runtime/interfaceSupport.cpp index 53216f14f2497..11a7d9fd41f07 100644 --- a/src/hotspot/share/runtime/interfaceSupport.cpp +++ b/src/hotspot/share/runtime/interfaceSupport.cpp @@ -26,7 +26,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index ba98f5928aad0..f5cd43b17695b 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -56,7 +56,7 @@ #include "prims/jvmtiDeferredUpdates.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/continuationEntry.inline.hpp" #include "runtime/continuationHelper.inline.hpp" @@ -1064,11 +1064,11 @@ JavaThread* JavaThread::active() { } oop JavaThread::exception_oop() const { - return Atomic::load(&_exception_oop); + return AtomicAccess::load(&_exception_oop); } void JavaThread::set_exception_oop(oop o) { - Atomic::store(&_exception_oop, o); + AtomicAccess::store(&_exception_oop, o); } void JavaThread::handle_special_runtime_exit_condition() { diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 32003e1962e72..00bc59691960b 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -182,13 +182,13 @@ class JavaThread: public Thread { // For tracking the heavyweight monitor the thread is pending on. ObjectMonitor* current_pending_monitor() { - // Use Atomic::load() to prevent data race between concurrent modification and + // Use AtomicAccess::load() to prevent data race between concurrent modification and // concurrent readers, e.g. ThreadService::get_current_contended_monitor(). // Especially, reloading pointer from thread after null check must be prevented. - return Atomic::load(&_current_pending_monitor); + return AtomicAccess::load(&_current_pending_monitor); } void set_current_pending_monitor(ObjectMonitor* monitor) { - Atomic::store(&_current_pending_monitor, monitor); + AtomicAccess::store(&_current_pending_monitor, monitor); } void set_current_pending_monitor_is_from_java(bool from_java) { _current_pending_monitor_is_from_java = from_java; @@ -198,10 +198,10 @@ class JavaThread: public Thread { } ObjectMonitor* current_waiting_monitor() { // See the comment in current_pending_monitor() above. - return Atomic::load(&_current_waiting_monitor); + return AtomicAccess::load(&_current_waiting_monitor); } void set_current_waiting_monitor(ObjectMonitor* monitor) { - Atomic::store(&_current_waiting_monitor, monitor); + AtomicAccess::store(&_current_waiting_monitor, monitor); } // JNI handle support @@ -715,7 +715,7 @@ class JavaThread: public Thread { inline bool clear_carrier_thread_suspended(); bool is_carrier_thread_suspended() const { - return Atomic::load(&_carrier_thread_suspended); + return AtomicAccess::load(&_carrier_thread_suspended); } bool is_in_VTMS_transition() const { return _is_in_VTMS_transition; } @@ -727,8 +727,8 @@ class JavaThread: public Thread { bool is_in_java_upcall() const { return _is_in_java_upcall; } void toggle_is_in_java_upcall() { _is_in_java_upcall = !_is_in_java_upcall; }; - bool VTMS_transition_mark() const { return Atomic::load(&_VTMS_transition_mark); } - void set_VTMS_transition_mark(bool val) { Atomic::store(&_VTMS_transition_mark, val); } + bool VTMS_transition_mark() const { return AtomicAccess::load(&_VTMS_transition_mark); } + void set_VTMS_transition_mark(bool val) { AtomicAccess::store(&_VTMS_transition_mark, val); } // Temporarily skip posting JVMTI events for safety reasons when executions is in a critical section: // - is in a VTMS transition (_is_in_VTMS_transition) @@ -943,7 +943,7 @@ class JavaThread: public Thread { } // Atomic version; invoked by a thread other than the owning thread. - bool in_critical_atomic() { return Atomic::load(&_jni_active_critical) > 0; } + bool in_critical_atomic() { return AtomicAccess::load(&_jni_active_critical) > 0; } // Checked JNI: is the programmer required to check for exceptions, if so specify // which function name. Returning to a Java frame should implicitly clear the diff --git a/src/hotspot/share/runtime/javaThread.inline.hpp b/src/hotspot/share/runtime/javaThread.inline.hpp index be76407f511da..bffacef24d00b 100644 --- a/src/hotspot/share/runtime/javaThread.inline.hpp +++ b/src/hotspot/share/runtime/javaThread.inline.hpp @@ -33,7 +33,7 @@ #include "memory/universe.hpp" #include "oops/instanceKlass.hpp" #include "oops/oopHandle.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuation.hpp" #include "runtime/continuationEntry.inline.hpp" #include "runtime/lockStack.inline.hpp" @@ -47,14 +47,14 @@ inline void JavaThread::set_suspend_flag(SuspendFlags f) { do { flags = _suspend_flags; } - while (Atomic::cmpxchg(&_suspend_flags, flags, (flags | f)) != flags); + while (AtomicAccess::cmpxchg(&_suspend_flags, flags, (flags | f)) != flags); } inline void JavaThread::clear_suspend_flag(SuspendFlags f) { uint32_t flags; do { flags = _suspend_flags; } - while (Atomic::cmpxchg(&_suspend_flags, flags, (flags & ~f)) != flags); + while (AtomicAccess::cmpxchg(&_suspend_flags, flags, (flags & ~f)) != flags); } inline void JavaThread::set_obj_deopt_flag() { @@ -66,10 +66,10 @@ inline void JavaThread::clear_obj_deopt_flag() { #if INCLUDE_JVMTI inline bool JavaThread::set_carrier_thread_suspended() { - return Atomic::cmpxchg(&_carrier_thread_suspended, false, true) == false; + return AtomicAccess::cmpxchg(&_carrier_thread_suspended, false, true) == false; } inline bool JavaThread::clear_carrier_thread_suspended() { - return Atomic::cmpxchg(&_carrier_thread_suspended, true, false) == true; + return AtomicAccess::cmpxchg(&_carrier_thread_suspended, true, false) == true; } #endif @@ -137,9 +137,9 @@ inline JavaThreadState JavaThread::thread_state() const { #if defined(PPC64) || defined (AARCH64) || defined(RISCV64) // Use membars when accessing volatile _thread_state. See // Threads::create_vm() for size checks. - return Atomic::load_acquire(&_thread_state); + return AtomicAccess::load_acquire(&_thread_state); #else - return Atomic::load(&_thread_state); + return AtomicAccess::load(&_thread_state); #endif } @@ -149,9 +149,9 @@ inline void JavaThread::set_thread_state(JavaThreadState s) { #if defined(PPC64) || defined (AARCH64) || defined(RISCV64) // Use membars when accessing volatile _thread_state. See // Threads::create_vm() for size checks. - Atomic::release_store(&_thread_state, s); + AtomicAccess::release_store(&_thread_state, s); #else - Atomic::store(&_thread_state, s); + AtomicAccess::store(&_thread_state, s); #endif } @@ -198,25 +198,25 @@ inline void JavaThread::set_done_attaching_via_jni() { } inline bool JavaThread::is_exiting() const { - TerminatedTypes l_terminated = Atomic::load_acquire(&_terminated); + TerminatedTypes l_terminated = AtomicAccess::load_acquire(&_terminated); return l_terminated == _thread_exiting || l_terminated == _thread_gc_barrier_detached || check_is_terminated(l_terminated); } inline bool JavaThread::is_oop_safe() const { - TerminatedTypes l_terminated = Atomic::load_acquire(&_terminated); + TerminatedTypes l_terminated = AtomicAccess::load_acquire(&_terminated); return l_terminated != _thread_gc_barrier_detached && !check_is_terminated(l_terminated); } inline bool JavaThread::is_terminated() const { - TerminatedTypes l_terminated = Atomic::load_acquire(&_terminated); + TerminatedTypes l_terminated = AtomicAccess::load_acquire(&_terminated); return check_is_terminated(l_terminated); } inline void JavaThread::set_terminated(TerminatedTypes t) { - Atomic::release_store(&_terminated, t); + AtomicAccess::release_store(&_terminated, t); } inline bool JavaThread::is_active_Java_thread() const { diff --git a/src/hotspot/share/runtime/jniHandles.cpp b/src/hotspot/share/runtime/jniHandles.cpp index e7564467a812f..807661a66d24d 100644 --- a/src/hotspot/share/runtime/jniHandles.cpp +++ b/src/hotspot/share/runtime/jniHandles.cpp @@ -335,7 +335,7 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType } else { block = new JNIHandleBlock(); } - Atomic::inc(&_blocks_allocated); + AtomicAccess::inc(&_blocks_allocated); block->zap(); } block->_top = 0; @@ -372,7 +372,7 @@ void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) { DEBUG_ONLY(block->set_pop_frame_link(nullptr)); while (block != nullptr) { JNIHandleBlock* next = block->_next; - Atomic::dec(&_blocks_allocated); + AtomicAccess::dec(&_blocks_allocated); assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null"); delete block; block = next; diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.cpp b/src/hotspot/share/runtime/lightweightSynchronizer.cpp index 6cec5c80982e4..ebb14490365b3 100644 --- a/src/hotspot/share/runtime/lightweightSynchronizer.cpp +++ b/src/hotspot/share/runtime/lightweightSynchronizer.cpp @@ -29,7 +29,7 @@ #include "memory/resourceArea.hpp" #include "nmt/memTag.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/basicLock.inline.hpp" #include "runtime/globals_extension.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -115,15 +115,15 @@ class ObjectMonitorTable : AllStatic { }; static void inc_items_count() { - Atomic::inc(&_items_count, memory_order_relaxed); + AtomicAccess::inc(&_items_count, memory_order_relaxed); } static void dec_items_count() { - Atomic::dec(&_items_count, memory_order_relaxed); + AtomicAccess::dec(&_items_count, memory_order_relaxed); } static double get_load_factor() { - size_t count = Atomic::load(&_items_count); + size_t count = AtomicAccess::load(&_items_count); return (double)count / (double)_table_size; } @@ -195,8 +195,8 @@ class ObjectMonitorTable : AllStatic { } static void try_notify_grow() { - if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) { - Atomic::store(&_resize, true); + if (!_table->is_max_size_reached() && !AtomicAccess::load(&_resize)) { + AtomicAccess::store(&_resize, true); if (Service_lock->try_lock()) { Service_lock->notify(); Service_lock->unlock(); @@ -216,7 +216,7 @@ class ObjectMonitorTable : AllStatic { } static bool should_resize() { - return should_grow() || should_shrink() || Atomic::load(&_resize); + return should_grow() || should_shrink() || AtomicAccess::load(&_resize); } template @@ -265,14 +265,14 @@ class ObjectMonitorTable : AllStatic { lt.print("Start growing with load factor %f", get_load_factor()); success = grow(current); } else { - if (!_table->is_max_size_reached() && Atomic::load(&_resize)) { + if (!_table->is_max_size_reached() && AtomicAccess::load(&_resize)) { lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor()); } lt.print("Start cleaning with load factor %f", get_load_factor()); success = clean(current); } - Atomic::store(&_resize, false); + AtomicAccess::store(&_resize, false); return success; } diff --git a/src/hotspot/share/runtime/mutex.hpp b/src/hotspot/share/runtime/mutex.hpp index c0688c573af88..cf2b222d2daa0 100644 --- a/src/hotspot/share/runtime/mutex.hpp +++ b/src/hotspot/share/runtime/mutex.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_RUNTIME_MUTEX_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/semaphore.hpp" #if defined(LINUX) || defined(AIX) || defined(BSD) @@ -104,7 +104,7 @@ class Mutex : public CHeapObj { // the low-level _lock, or to null before it has released the _lock. Accesses by any thread other // than the lock owner are inherently racy. Thread* volatile _owner; - void raw_set_owner(Thread* new_owner) { Atomic::store(&_owner, new_owner); } + void raw_set_owner(Thread* new_owner) { AtomicAccess::store(&_owner, new_owner); } protected: // Monitor-Mutex metadata PlatformMonitor _lock; // Native monitor implementation @@ -202,7 +202,7 @@ class Mutex : public CHeapObj { // Current owner - note not MT-safe. Can only be used to guarantee that // the current running thread owns the lock - Thread* owner() const { return Atomic::load(&_owner); } + Thread* owner() const { return AtomicAccess::load(&_owner); } void set_owner(Thread* owner) { set_owner_implementation(owner); } bool owned_by_self() const; diff --git a/src/hotspot/share/runtime/nonJavaThread.cpp b/src/hotspot/share/runtime/nonJavaThread.cpp index 71c44d2a83b2f..2fb4c2dce022f 100644 --- a/src/hotspot/share/runtime/nonJavaThread.cpp +++ b/src/hotspot/share/runtime/nonJavaThread.cpp @@ -25,7 +25,7 @@ #include "gc/shared/barrierSet.hpp" #include "gc/shared/gcId.hpp" #include "jvm_io.h" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/jniHandles.hpp" #include "runtime/mutexLocker.hpp" @@ -54,7 +54,7 @@ NonJavaThread::List NonJavaThread::_the_list; NonJavaThread::Iterator::Iterator() : _protect_enter(_the_list._protect.enter()), - _current(Atomic::load_acquire(&_the_list._head)) + _current(AtomicAccess::load_acquire(&_the_list._head)) {} NonJavaThread::Iterator::~Iterator() { @@ -63,7 +63,7 @@ NonJavaThread::Iterator::~Iterator() { void NonJavaThread::Iterator::step() { assert(!end(), "precondition"); - _current = Atomic::load_acquire(&_current->_next); + _current = AtomicAccess::load_acquire(&_current->_next); } NonJavaThread::NonJavaThread() : Thread(), _next(nullptr) { @@ -76,8 +76,8 @@ void NonJavaThread::add_to_the_list() { MutexLocker ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag); // Initialize BarrierSet-related data before adding to list. BarrierSet::barrier_set()->on_thread_attach(this); - Atomic::release_store(&_next, _the_list._head); - Atomic::release_store(&_the_list._head, this); + AtomicAccess::release_store(&_next, _the_list._head); + AtomicAccess::release_store(&_the_list._head, this); } void NonJavaThread::remove_from_the_list() { diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index c1febe7a28c03..8859f6e7f5f29 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -37,7 +37,7 @@ #include "oops/weakHandle.inline.hpp" #include "prims/jvmtiDeferredUpdates.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/continuationWrapper.inline.hpp" #include "runtime/globals.hpp" #include "runtime/handles.inline.hpp" @@ -695,9 +695,9 @@ void ObjectMonitor::add_to_entry_list(JavaThread* current, ObjectWaiter* node) { node->TState = ObjectWaiter::TS_ENTER; for (;;) { - ObjectWaiter* head = Atomic::load(&_entry_list); + ObjectWaiter* head = AtomicAccess::load(&_entry_list); node->_next = head; - if (Atomic::cmpxchg(&_entry_list, head, node) == head) { + if (AtomicAccess::cmpxchg(&_entry_list, head, node) == head) { return; } } @@ -714,9 +714,9 @@ bool ObjectMonitor::try_lock_or_add_to_entry_list(JavaThread* current, ObjectWai node->TState = ObjectWaiter::TS_ENTER; for (;;) { - ObjectWaiter* head = Atomic::load(&_entry_list); + ObjectWaiter* head = AtomicAccess::load(&_entry_list); node->_next = head; - if (Atomic::cmpxchg(&_entry_list, head, node) == head) { + if (AtomicAccess::cmpxchg(&_entry_list, head, node) == head) { return false; } @@ -805,7 +805,7 @@ bool ObjectMonitor::deflate_monitor(Thread* current) { // Make a zero contentions field negative to force any contending threads // to retry. This is the second part of the async deflation dance. - if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) { + if (AtomicAccess::cmpxchg(&_contentions, 0, INT_MIN) != 0) { // Contentions was no longer 0 so we lost the race since the // ObjectMonitor is now busy. Restore owner to null if it is // still DEFLATER_MARKER: @@ -822,7 +822,7 @@ bool ObjectMonitor::deflate_monitor(Thread* current) { guarantee(contentions() < 0, "must be negative: contentions=%d", contentions()); guarantee(_waiters == 0, "must be 0: waiters=%d", _waiters); - ObjectWaiter* w = Atomic::load(&_entry_list); + ObjectWaiter* w = AtomicAccess::load(&_entry_list); guarantee(w == nullptr, "must be no entering threads: entry_list=" INTPTR_FORMAT, p2i(w)); @@ -1269,7 +1269,7 @@ void ObjectMonitor::entry_list_build_dll(JavaThread* current) { ObjectWaiter* prev = nullptr; // Need acquire here to match the implicit release of the cmpxchg // that updated entry_list, so we can access w->prev(). - ObjectWaiter* w = Atomic::load_acquire(&_entry_list); + ObjectWaiter* w = AtomicAccess::load_acquire(&_entry_list); assert(w != nullptr, "should only be called when entry list is not empty"); while (w != nullptr) { assert(w->TState == ObjectWaiter::TS_ENTER, "invariant"); @@ -1338,10 +1338,10 @@ void ObjectMonitor::unlink_after_acquire(JavaThread* current, ObjectWaiter* curr if (currentNode->next() == nullptr) { assert(_entry_list_tail == nullptr || _entry_list_tail == currentNode, "invariant"); - ObjectWaiter* w = Atomic::load(&_entry_list); + ObjectWaiter* w = AtomicAccess::load(&_entry_list); if (w == currentNode) { // The currentNode is the only element in _entry_list. - if (Atomic::cmpxchg(&_entry_list, w, (ObjectWaiter*)nullptr) == w) { + if (AtomicAccess::cmpxchg(&_entry_list, w, (ObjectWaiter*)nullptr) == w) { _entry_list_tail = nullptr; currentNode->set_bad_pointers(); return; @@ -1378,13 +1378,13 @@ void ObjectMonitor::unlink_after_acquire(JavaThread* current, ObjectWaiter* curr // _entry_list. If we are the head then we try to remove ourselves, // else we convert to the doubly linked list. if (currentNode->prev() == nullptr) { - ObjectWaiter* w = Atomic::load(&_entry_list); + ObjectWaiter* w = AtomicAccess::load(&_entry_list); assert(w != nullptr, "invariant"); if (w == currentNode) { ObjectWaiter* next = currentNode->next(); // currentNode is at the head of _entry_list. - if (Atomic::cmpxchg(&_entry_list, w, next) == w) { + if (AtomicAccess::cmpxchg(&_entry_list, w, next) == w) { // The CAS above sucsessfully unlinked currentNode from the // head of the _entry_list. assert(_entry_list != w, "invariant"); @@ -1511,7 +1511,7 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { // possible, so that the successor can acquire the lock. If there is // no successor, we might need to wake up a waiting thread. if (!has_successor()) { - ObjectWaiter* w = Atomic::load(&_entry_list); + ObjectWaiter* w = AtomicAccess::load(&_entry_list); if (w != nullptr) { // Other threads are blocked trying to acquire the lock and // there is no successor, so it appears that an heir- diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp index ab315ba0fc59f..3c925928be251 100644 --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -281,7 +281,7 @@ class ObjectMonitor : public CHeapObj { // Same as above but uses owner_id of current as new value. void set_owner_from(int64_t old_value, JavaThread* current); // Try to set _owner field to new_value if the current value matches - // old_value, using Atomic::cmpxchg(). Otherwise, does not change the + // old_value, using AtomicAccess::cmpxchg(). Otherwise, does not change the // _owner field. Returns the prior value of the _owner field. int64_t try_set_owner_from_raw(int64_t old_value, int64_t new_value); // Same as above but uses owner_id of current as new_value. diff --git a/src/hotspot/share/runtime/objectMonitor.inline.hpp b/src/hotspot/share/runtime/objectMonitor.inline.hpp index 05ad06c9bf325..52ef904b404d5 100644 --- a/src/hotspot/share/runtime/objectMonitor.inline.hpp +++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp @@ -31,7 +31,7 @@ #include "logging/log.hpp" #include "oops/access.inline.hpp" #include "oops/markWord.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/lockStack.inline.hpp" @@ -60,11 +60,11 @@ inline bool ObjectMonitor::is_entered(JavaThread* current) const { } inline uintptr_t ObjectMonitor::metadata() const { - return Atomic::load(&_metadata); + return AtomicAccess::load(&_metadata); } inline void ObjectMonitor::set_metadata(uintptr_t value) { - Atomic::store(&_metadata, value); + AtomicAccess::store(&_metadata, value); } inline volatile uintptr_t* ObjectMonitor::metadata_addr() { @@ -109,7 +109,7 @@ inline int64_t ObjectMonitor::owner() const { } inline int64_t ObjectMonitor::owner_raw() const { - return Atomic::load(&_owner); + return AtomicAccess::load(&_owner); } // Returns true if owner field == DEFLATER_MARKER and false otherwise. @@ -124,12 +124,12 @@ inline bool ObjectMonitor::is_being_async_deflated() { // Return number of threads contending for this monitor. inline int ObjectMonitor::contentions() const { - return Atomic::load(&_contentions); + return AtomicAccess::load(&_contentions); } // Add value to the contentions field. inline void ObjectMonitor::add_to_contentions(int value) { - Atomic::add(&_contentions, value); + AtomicAccess::add(&_contentions, value); } inline void ObjectMonitor::set_recursions(size_t recursions) { @@ -147,11 +147,11 @@ inline void ObjectMonitor::increment_recursions(JavaThread* current) { inline void ObjectMonitor::release_clear_owner(JavaThread* old_owner) { int64_t old_value = owner_id_from(old_owner); #ifdef ASSERT - int64_t prev = Atomic::load(&_owner); + int64_t prev = AtomicAccess::load(&_owner); assert(prev == old_value, "unexpected prev owner=" INT64_FORMAT ", expected=" INT64_FORMAT, prev, old_value); #endif - Atomic::release_store(&_owner, NO_OWNER); + AtomicAccess::release_store(&_owner, NO_OWNER); log_trace(monitorinflation, owner)("release_clear_owner(): mid=" INTPTR_FORMAT ", old_value=" INT64_FORMAT, p2i(this), old_value); @@ -161,12 +161,12 @@ inline void ObjectMonitor::release_clear_owner(JavaThread* old_owner) { // (Simple means no memory sync needed.) inline void ObjectMonitor::set_owner_from_raw(int64_t old_value, int64_t new_value) { #ifdef ASSERT - int64_t prev = Atomic::load(&_owner); + int64_t prev = AtomicAccess::load(&_owner); assert((int64_t)prev < ThreadIdentifier::current(), "must be reasonable"); assert(prev == old_value, "unexpected prev owner=" INT64_FORMAT ", expected=" INT64_FORMAT, prev, old_value); #endif - Atomic::store(&_owner, new_value); + AtomicAccess::store(&_owner, new_value); log_trace(monitorinflation, owner)("set_owner_from(): mid=" INTPTR_FORMAT ", old_value=" INT64_FORMAT ", new_value=" INT64_FORMAT, p2i(this), @@ -182,7 +182,7 @@ inline void ObjectMonitor::set_owner_from(int64_t old_value, JavaThread* current // the prior value of the _owner field. inline int64_t ObjectMonitor::try_set_owner_from_raw(int64_t old_value, int64_t new_value) { assert((int64_t)new_value < ThreadIdentifier::current(), "must be reasonable"); - int64_t prev = Atomic::cmpxchg(&_owner, old_value, new_value); + int64_t prev = AtomicAccess::cmpxchg(&_owner, old_value, new_value); if (prev == old_value) { log_trace(monitorinflation, owner)("try_set_owner_from(): mid=" INTPTR_FORMAT ", prev=" INT64_FORMAT @@ -197,27 +197,27 @@ inline int64_t ObjectMonitor::try_set_owner_from(int64_t old_value, JavaThread* } inline bool ObjectMonitor::has_successor() const { - return Atomic::load(&_succ) != NO_OWNER; + return AtomicAccess::load(&_succ) != NO_OWNER; } inline bool ObjectMonitor::has_successor(JavaThread* thread) const { - return owner_id_from(thread) == Atomic::load(&_succ); + return owner_id_from(thread) == AtomicAccess::load(&_succ); } inline void ObjectMonitor::set_successor(JavaThread* thread) { - Atomic::store(&_succ, owner_id_from(thread)); + AtomicAccess::store(&_succ, owner_id_from(thread)); } inline void ObjectMonitor::set_successor(oop vthread) { - Atomic::store(&_succ, java_lang_Thread::thread_id(vthread)); + AtomicAccess::store(&_succ, java_lang_Thread::thread_id(vthread)); } inline void ObjectMonitor::clear_successor() { - Atomic::store(&_succ, NO_OWNER); + AtomicAccess::store(&_succ, NO_OWNER); } inline int64_t ObjectMonitor::successor() const { - return Atomic::load(&_succ); + return AtomicAccess::load(&_succ); } // The _next_om field can be concurrently read and modified so we @@ -226,12 +226,12 @@ inline int64_t ObjectMonitor::successor() const { // Simply get _next_om field. inline ObjectMonitor* ObjectMonitor::next_om() const { - return Atomic::load(&_next_om); + return AtomicAccess::load(&_next_om); } // Simply set _next_om field to new_value. inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) { - Atomic::store(&_next_om, new_value); + AtomicAccess::store(&_next_om, new_value); } // Block out deflation. diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 0f61770a6394b..4f00ff1726913 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -50,7 +50,7 @@ #include "prims/jvmtiAgent.hpp" #include "prims/jvmtiAgentList.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -827,7 +827,7 @@ int os::random() { while (true) { unsigned int seed = _rand_seed; unsigned int rand = next_random(seed); - if (Atomic::cmpxchg(&_rand_seed, seed, rand, memory_order_relaxed) == seed) { + if (AtomicAccess::cmpxchg(&_rand_seed, seed, rand, memory_order_relaxed) == seed) { return static_cast(rand); } } @@ -1576,12 +1576,12 @@ void os::read_image_release_file() { tmp[i] = ' '; } } - Atomic::release_store(&_image_release_file_content, tmp); + AtomicAccess::release_store(&_image_release_file_content, tmp); fclose(file); } void os::print_image_release_file(outputStream* st) { - char* ifrc = Atomic::load_acquire(&_image_release_file_content); + char* ifrc = AtomicAccess::load_acquire(&_image_release_file_content); if (ifrc != nullptr) { st->print_cr("%s", ifrc); } else { @@ -2346,7 +2346,7 @@ void os::pretouch_memory(void* start, void* end, size_t page_size) { // avoid overflow if the last page abuts the end of the address range. last = align_down(static_cast(end) - 1, pd_page_size); for (char* cur = static_cast(first); /* break */; cur += pd_page_size) { - Atomic::add(reinterpret_cast(cur), 0, memory_order_relaxed); + AtomicAccess::add(reinterpret_cast(cur), 0, memory_order_relaxed); if (cur >= last) break; } } diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 9db4380fc077c..d06ce0e322bbc 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -1106,7 +1106,7 @@ class os: AllStatic { }; // Note that "PAUSE" is almost always used with synchronization -// so arguably we should provide Atomic::SpinPause() instead +// so arguably we should provide AtomicAccess::SpinPause() instead // of the global SpinPause() with C linkage. // It'd also be eligible for inlining on many platforms. diff --git a/src/hotspot/share/runtime/perfData.cpp b/src/hotspot/share/runtime/perfData.cpp index bc9d86400dcf5..7532ada8f5aba 100644 --- a/src/hotspot/share/runtime/perfData.cpp +++ b/src/hotspot/share/runtime/perfData.cpp @@ -250,7 +250,7 @@ void PerfDataManager::destroy() { // counter users that we are at shutdown; b) sync up with current users, waiting // for them to finish with counters. // - Atomic::store(&_has_PerfData, false); + AtomicAccess::store(&_has_PerfData, false); GlobalCounter::write_synchronize(); log_debug(perf, datacreation)("Total = %d, Constants = %d", @@ -276,7 +276,7 @@ void PerfDataManager::add_item(PerfData* p) { // Default sizes determined using -Xlog:perf+datacreation=debug if (_all == nullptr) { _all = new PerfDataList(191); - Atomic::release_store(&_has_PerfData, true); + AtomicAccess::release_store(&_has_PerfData, true); } assert(!_all->contains(p->name()), "duplicate name added: %s", p->name()); diff --git a/src/hotspot/share/runtime/perfData.hpp b/src/hotspot/share/runtime/perfData.hpp index 7f05e4673fce5..ca1f1f410ad64 100644 --- a/src/hotspot/share/runtime/perfData.hpp +++ b/src/hotspot/share/runtime/perfData.hpp @@ -26,7 +26,7 @@ #define SHARE_RUNTIME_PERFDATA_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/perfDataTypes.hpp" #include "runtime/perfMemory.hpp" #include "runtime/timer.hpp" @@ -697,7 +697,7 @@ class PerfDataManager : AllStatic { } static void destroy(); - static bool has_PerfData() { return Atomic::load_acquire(&_has_PerfData); } + static bool has_PerfData() { return AtomicAccess::load_acquire(&_has_PerfData); } }; // Useful macros to create the performance counters diff --git a/src/hotspot/share/runtime/perfMemory.cpp b/src/hotspot/share/runtime/perfMemory.cpp index 1122aa8da471d..a75a41e95a94c 100644 --- a/src/hotspot/share/runtime/perfMemory.cpp +++ b/src/hotspot/share/runtime/perfMemory.cpp @@ -26,7 +26,7 @@ #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/java.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" @@ -154,7 +154,7 @@ void PerfMemory::initialize() { _prologue->overflow = 0; _prologue->mod_time_stamp = 0; - Atomic::release_store(&_initialized, 1); + AtomicAccess::release_store(&_initialized, 1); } void PerfMemory::destroy() { @@ -267,5 +267,5 @@ char* PerfMemory::get_perfdata_file_path() { } bool PerfMemory::is_initialized() { - return Atomic::load_acquire(&_initialized) != 0; + return AtomicAccess::load_acquire(&_initialized) != 0; } diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index ef9e0981913b7..a1922612bd685 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -41,7 +41,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/globals.hpp" @@ -306,7 +306,7 @@ void SafepointSynchronize::arm_safepoint() { assert((_safepoint_counter & 0x1) == 0, "must be even"); // The store to _safepoint_counter must happen after any stores in arming. - Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1); + AtomicAccess::release_store(&_safepoint_counter, _safepoint_counter + 1); // We are synchronizing OrderAccess::storestore(); // Ordered with _safepoint_counter @@ -441,7 +441,7 @@ void SafepointSynchronize::disarm_safepoint() { // Set the next dormant (even) safepoint id. assert((_safepoint_counter & 0x1) == 1, "must be odd"); - Atomic::release_store(&_safepoint_counter, _safepoint_counter + 1); + AtomicAccess::release_store(&_safepoint_counter, _safepoint_counter + 1); OrderAccess::fence(); // Keep the local state from floating up. @@ -609,7 +609,7 @@ void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) { MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, thread)); if (log_is_enabled(Info, safepoint, stats)) { - Atomic::inc(&_nof_threads_hit_polling_page); + AtomicAccess::inc(&_nof_threads_hit_polling_page); } ThreadSafepointState* state = thread->safepoint_state(); @@ -685,15 +685,15 @@ void ThreadSafepointState::destroy(JavaThread *thread) { } uint64_t ThreadSafepointState::get_safepoint_id() const { - return Atomic::load_acquire(&_safepoint_id); + return AtomicAccess::load_acquire(&_safepoint_id); } void ThreadSafepointState::reset_safepoint_id() { - Atomic::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter); + AtomicAccess::release_store(&_safepoint_id, SafepointSynchronize::InactiveSafepointCounter); } void ThreadSafepointState::set_safepoint_id(uint64_t safepoint_id) { - Atomic::release_store(&_safepoint_id, safepoint_id); + AtomicAccess::release_store(&_safepoint_id, safepoint_id); } void ThreadSafepointState::examine_state_of_thread(uint64_t safepoint_count) { diff --git a/src/hotspot/share/runtime/safepointMechanism.inline.hpp b/src/hotspot/share/runtime/safepointMechanism.inline.hpp index 6b848ee6af99a..e2e29c7af1e0a 100644 --- a/src/hotspot/share/runtime/safepointMechanism.inline.hpp +++ b/src/hotspot/share/runtime/safepointMechanism.inline.hpp @@ -27,7 +27,7 @@ #include "runtime/safepointMechanism.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handshake.hpp" #include "runtime/safepoint.hpp" #include "runtime/stackWatermarkSet.hpp" @@ -37,18 +37,18 @@ // Caller is responsible for using a memory barrier if needed. inline void SafepointMechanism::ThreadData::set_polling_page(uintptr_t poll_value) { - Atomic::store(&_polling_page, poll_value); + AtomicAccess::store(&_polling_page, poll_value); } // Caller is responsible for using a memory barrier if needed. inline void SafepointMechanism::ThreadData::set_polling_word(uintptr_t poll_value) { - Atomic::store(&_polling_word, poll_value); + AtomicAccess::store(&_polling_word, poll_value); } // The acquire makes sure reading of polling page is done before // the reading the handshake operation or the global state inline uintptr_t SafepointMechanism::ThreadData::get_polling_word() { - return Atomic::load_acquire(&_polling_word); + return AtomicAccess::load_acquire(&_polling_word); } bool SafepointMechanism::local_poll_armed(JavaThread* thread) { diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 710d34c3ccb23..b7ad8081c5287 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -58,7 +58,7 @@ #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -896,7 +896,7 @@ void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool de // bindings. current->clear_scopedValueBindings(); // Increment counter for hs_err file reporting - Atomic::inc(&Exceptions::_stack_overflow_errors); + AtomicAccess::inc(&Exceptions::_stack_overflow_errors); throw_and_post_jvmti_exception(current, exception); } @@ -1376,7 +1376,7 @@ methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, T uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : (is_virtual) ? (&_resolve_virtual_ctr) : (&_resolve_static_ctr); - Atomic::inc(addr); + AtomicAccess::inc(addr); if (TraceCallFixup) { ResourceMark rm(current); @@ -1602,7 +1602,7 @@ methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) { methodHandle callee_method(current, call_info.selected_method()); #ifndef PRODUCT - Atomic::inc(&_ic_miss_ctr); + AtomicAccess::inc(&_ic_miss_ctr); // Statistics & Tracing if (TraceCallFixup) { @@ -1728,7 +1728,7 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) { #ifndef PRODUCT - Atomic::inc(&_wrong_method_ctr); + AtomicAccess::inc(&_wrong_method_ctr); if (TraceCallFixup) { ResourceMark rm(current); diff --git a/src/hotspot/share/runtime/stackWatermark.cpp b/src/hotspot/share/runtime/stackWatermark.cpp index 4c3533cb91f71..97d6581f08bbf 100644 --- a/src/hotspot/share/runtime/stackWatermark.cpp +++ b/src/hotspot/share/runtime/stackWatermark.cpp @@ -23,7 +23,7 @@ */ #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/javaThread.hpp" #include "runtime/osThread.hpp" @@ -186,7 +186,7 @@ void StackWatermark::assert_is_frame_safe(const frame& f) { // without going through any hooks. bool StackWatermark::is_frame_safe(const frame& f) { assert(_lock.owned_by_self(), "Must be locked"); - uint32_t state = Atomic::load(&_state); + uint32_t state = AtomicAccess::load(&_state); if (!processing_started(state)) { return false; } @@ -231,11 +231,11 @@ void StackWatermark::update_watermark() { assert(_lock.owned_by_self(), "invariant"); if (_iterator != nullptr && _iterator->has_next()) { assert(_iterator->callee() != 0, "sanity"); - Atomic::release_store(&_watermark, _iterator->callee()); - Atomic::release_store(&_state, StackWatermarkState::create(epoch_id(), false /* is_done */)); // release watermark w.r.t. epoch + AtomicAccess::release_store(&_watermark, _iterator->callee()); + AtomicAccess::release_store(&_state, StackWatermarkState::create(epoch_id(), false /* is_done */)); // release watermark w.r.t. epoch } else { - Atomic::release_store(&_watermark, uintptr_t(0)); // Release stack data modifications w.r.t. watermark - Atomic::release_store(&_state, StackWatermarkState::create(epoch_id(), true /* is_done */)); // release watermark w.r.t. epoch + AtomicAccess::release_store(&_watermark, uintptr_t(0)); // Release stack data modifications w.r.t. watermark + AtomicAccess::release_store(&_state, StackWatermarkState::create(epoch_id(), true /* is_done */)); // release watermark w.r.t. epoch log_info(stackbarrier)("Finished stack processing iteration for tid %d", _jt->osthread()->thread_id()); } @@ -263,7 +263,7 @@ void StackWatermark::pop_linked_watermark() { } uintptr_t StackWatermark::watermark() { - return Atomic::load_acquire(&_watermark); + return AtomicAccess::load_acquire(&_watermark); } uintptr_t StackWatermark::last_processed() { @@ -284,19 +284,19 @@ uintptr_t StackWatermark::last_processed_raw() { } bool StackWatermark::processing_started() const { - return processing_started(Atomic::load(&_state)); + return processing_started(AtomicAccess::load(&_state)); } bool StackWatermark::processing_started_acquire() const { - return processing_started(Atomic::load_acquire(&_state)); + return processing_started(AtomicAccess::load_acquire(&_state)); } bool StackWatermark::processing_completed() const { - return processing_completed(Atomic::load(&_state)); + return processing_completed(AtomicAccess::load(&_state)); } bool StackWatermark::processing_completed_acquire() const { - return processing_completed(Atomic::load_acquire(&_state)); + return processing_completed(AtomicAccess::load_acquire(&_state)); } void StackWatermark::process_linked_watermarks() { diff --git a/src/hotspot/share/runtime/stackWatermarkSet.cpp b/src/hotspot/share/runtime/stackWatermarkSet.cpp index 528bf767be1a1..9a2b087f185c5 100644 --- a/src/hotspot/share/runtime/stackWatermarkSet.cpp +++ b/src/hotspot/share/runtime/stackWatermarkSet.cpp @@ -23,7 +23,7 @@ */ #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/frame.inline.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/safepoint.hpp" diff --git a/src/hotspot/share/runtime/suspendResumeManager.cpp b/src/hotspot/share/runtime/suspendResumeManager.cpp index 2e75d763cb390..1c770eeec5853 100644 --- a/src/hotspot/share/runtime/suspendResumeManager.cpp +++ b/src/hotspot/share/runtime/suspendResumeManager.cpp @@ -26,7 +26,7 @@ #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiThreadState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/handshake.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -78,7 +78,7 @@ void SuspendResumeManager::set_suspended(bool is_suspend, bool register_vthread_ } } #endif - Atomic::store(&_suspended, is_suspend); + AtomicAccess::store(&_suspended, is_suspend); } bool SuspendResumeManager::suspend(bool register_vthread_SR) { diff --git a/src/hotspot/share/runtime/suspendResumeManager.hpp b/src/hotspot/share/runtime/suspendResumeManager.hpp index 01735cb3bf8e9..19b4ed02ef185 100644 --- a/src/hotspot/share/runtime/suspendResumeManager.hpp +++ b/src/hotspot/share/runtime/suspendResumeManager.hpp @@ -60,7 +60,7 @@ class SuspendResumeManager { void set_suspended(bool to, bool register_vthread_SR); bool is_suspended() { - return Atomic::load(&_suspended); + return AtomicAccess::load(&_suspended); } bool has_async_suspend_handshake() { return _async_suspend_handshake; } diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index ec4e91d8c6fff..ff4e09e741fe1 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -33,7 +33,7 @@ #include "memory/universe.hpp" #include "oops/markWord.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/basicLock.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/globals.hpp" @@ -70,26 +70,26 @@ class ObjectMonitorDeflationLogging; void MonitorList::add(ObjectMonitor* m) { ObjectMonitor* head; do { - head = Atomic::load(&_head); + head = AtomicAccess::load(&_head); m->set_next_om(head); - } while (Atomic::cmpxchg(&_head, head, m) != head); + } while (AtomicAccess::cmpxchg(&_head, head, m) != head); - size_t count = Atomic::add(&_count, 1u, memory_order_relaxed); + size_t count = AtomicAccess::add(&_count, 1u, memory_order_relaxed); size_t old_max; do { - old_max = Atomic::load(&_max); + old_max = AtomicAccess::load(&_max); if (count <= old_max) { break; } - } while (Atomic::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max); + } while (AtomicAccess::cmpxchg(&_max, old_max, count, memory_order_relaxed) != old_max); } size_t MonitorList::count() const { - return Atomic::load(&_count); + return AtomicAccess::load(&_count); } size_t MonitorList::max() const { - return Atomic::load(&_max); + return AtomicAccess::load(&_max); } class ObjectMonitorDeflationSafepointer : public StackObj { @@ -110,7 +110,7 @@ size_t MonitorList::unlink_deflated(size_t deflated_count, ObjectMonitorDeflationSafepointer* safepointer) { size_t unlinked_count = 0; ObjectMonitor* prev = nullptr; - ObjectMonitor* m = Atomic::load_acquire(&_head); + ObjectMonitor* m = AtomicAccess::load_acquire(&_head); while (m != nullptr) { if (m->is_being_async_deflated()) { @@ -131,7 +131,7 @@ size_t MonitorList::unlink_deflated(size_t deflated_count, // Reached the max batch, so bail out of the gathering loop. break; } - if (prev == nullptr && Atomic::load(&_head) != m) { + if (prev == nullptr && AtomicAccess::load(&_head) != m) { // Current batch used to be at head, but it is not at head anymore. // Bail out and figure out where we currently are. This avoids long // walks searching for new prev during unlink under heavy list inserts. @@ -143,7 +143,7 @@ size_t MonitorList::unlink_deflated(size_t deflated_count, if (prev == nullptr) { // The current batch is the first batch, so there is a chance that it starts at head. // Optimistically assume no inserts happened, and try to unlink the entire batch from the head. - ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, m, next); + ObjectMonitor* prev_head = AtomicAccess::cmpxchg(&_head, m, next); if (prev_head != m) { // Something must have updated the head. Figure out the actual prev for this batch. for (ObjectMonitor* n = prev_head; n != m; n = n->next_om()) { @@ -155,7 +155,7 @@ size_t MonitorList::unlink_deflated(size_t deflated_count, } else { // The current batch is preceded by another batch. This guarantees the current batch // does not start at head. Unlink the entire current batch without updating the head. - assert(Atomic::load(&_head) != m, "Sanity"); + assert(AtomicAccess::load(&_head) != m, "Sanity"); prev->set_next_om(next); } @@ -180,7 +180,7 @@ size_t MonitorList::unlink_deflated(size_t deflated_count, // The code that runs after this unlinking does not expect deflated monitors. // Notably, attempting to deflate the already deflated monitor would break. { - ObjectMonitor* m = Atomic::load_acquire(&_head); + ObjectMonitor* m = AtomicAccess::load_acquire(&_head); while (m != nullptr) { assert(!m->is_being_async_deflated(), "All deflated monitors should be unlinked"); m = m->next_om(); @@ -188,12 +188,12 @@ size_t MonitorList::unlink_deflated(size_t deflated_count, } #endif - Atomic::sub(&_count, unlinked_count); + AtomicAccess::sub(&_count, unlinked_count); return unlinked_count; } MonitorList::Iterator MonitorList::iterator() const { - return Iterator(Atomic::load_acquire(&_head)); + return Iterator(AtomicAccess::load_acquire(&_head)); } ObjectMonitor* MonitorList::Iterator::next() { @@ -723,7 +723,7 @@ intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { hash = get_next_hash(current, obj); // get a new hash temp = mark.copy_set_hash(hash) ; // merge the hash into header assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); - uintptr_t v = Atomic::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value()); + uintptr_t v = AtomicAccess::cmpxchg(monitor->metadata_addr(), mark.value(), temp.value()); test = markWord(v); if (test != mark) { // The attempt to update the ObjectMonitor's header/dmw field @@ -928,11 +928,11 @@ size_t ObjectSynchronizer::in_use_list_ceiling() { } void ObjectSynchronizer::dec_in_use_list_ceiling() { - Atomic::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate); + AtomicAccess::sub(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate); } void ObjectSynchronizer::inc_in_use_list_ceiling() { - Atomic::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate); + AtomicAccess::add(&_in_use_list_ceiling, AvgMonitorsPerThreadEstimate); } void ObjectSynchronizer::set_in_use_list_ceiling(size_t new_value) { diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 9c12da151807a..361743f83e874 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -36,7 +36,7 @@ #include "memory/resourceArea.hpp" #include "nmt/memTracker.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/nonJavaThread.hpp" @@ -290,7 +290,7 @@ Thread::~Thread() { ParkEvent::Release(_ParkEvent); // Set to null as a termination indicator for has_terminated(). - Atomic::store(&_ParkEvent, (ParkEvent*)nullptr); + AtomicAccess::store(&_ParkEvent, (ParkEvent*)nullptr); delete handle_area(); delete metadata_handles(); @@ -415,7 +415,7 @@ void Thread::start(Thread* thread) { bool Thread::claim_par_threads_do(uintx claim_token) { uintx token = _threads_do_token; if (token != claim_token) { - uintx res = Atomic::cmpxchg(&_threads_do_token, token, claim_token); + uintx res = AtomicAccess::cmpxchg(&_threads_do_token, token, claim_token); if (res == token) { return true; } @@ -575,7 +575,7 @@ bool Thread::set_as_starting_thread(JavaThread* jt) { // about native mutex_t or HotSpot Mutex:: latency. void Thread::SpinAcquire(volatile int * adr) { - if (Atomic::cmpxchg(adr, 0, 1) == 0) { + if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) { return; // normal fast-path return } @@ -596,7 +596,7 @@ void Thread::SpinAcquire(volatile int * adr) { SpinPause(); } } - if (Atomic::cmpxchg(adr, 0, 1) == 0) return; + if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return; } } @@ -611,5 +611,5 @@ void Thread::SpinRelease(volatile int * adr) { // before the store that releases the lock in memory visibility order. // So we need a #loadstore|#storestore "release" memory barrier before // the ST of 0 into the lock-word which releases the lock. - Atomic::release_store(adr, 0); + AtomicAccess::release_store(adr, 0); } diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index 772ef7bbe825f..fe2b997f94c28 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -30,7 +30,7 @@ #include "gc/shared/threadLocalAllocBuffer.hpp" #include "jni.h" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "runtime/safepointMechanism.hpp" @@ -598,7 +598,7 @@ class Thread: public ThreadShadow { // Termination indicator used by the signal handler. // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state // (which can't itself be read from the signal handler if a signal hits during the Thread destructor). - bool has_terminated() { return Atomic::load(&_ParkEvent) == nullptr; }; + bool has_terminated() { return AtomicAccess::load(&_ParkEvent) == nullptr; }; jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG jint _hashStateX; // thread-specific hashCode generator state diff --git a/src/hotspot/share/runtime/thread.inline.hpp b/src/hotspot/share/runtime/thread.inline.hpp index 98148d485d812..8e80cfc6125cb 100644 --- a/src/hotspot/share/runtime/thread.inline.hpp +++ b/src/hotspot/share/runtime/thread.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,14 +29,14 @@ #include "runtime/thread.hpp" #include "gc/shared/tlab_globals.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #if defined(__APPLE__) && defined(AARCH64) #include "runtime/os.hpp" #endif inline jlong Thread::cooked_allocated_bytes() { - jlong allocated_bytes = Atomic::load_acquire(&_allocated_bytes); + jlong allocated_bytes = AtomicAccess::load_acquire(&_allocated_bytes); if (UseTLAB) { // These reads are unsynchronized and unordered with the thread updating its tlab pointers. // Use only if top > start && used_bytes <= max_tlab_size_bytes. @@ -59,15 +59,15 @@ inline jlong Thread::cooked_allocated_bytes() { } inline ThreadsList* Thread::cmpxchg_threads_hazard_ptr(ThreadsList* exchange_value, ThreadsList* compare_value) { - return (ThreadsList*)Atomic::cmpxchg(&_threads_hazard_ptr, compare_value, exchange_value); + return (ThreadsList*)AtomicAccess::cmpxchg(&_threads_hazard_ptr, compare_value, exchange_value); } inline ThreadsList* Thread::get_threads_hazard_ptr() const { - return (ThreadsList*)Atomic::load_acquire(&_threads_hazard_ptr); + return (ThreadsList*)AtomicAccess::load_acquire(&_threads_hazard_ptr); } inline void Thread::set_threads_hazard_ptr(ThreadsList* new_list) { - Atomic::release_store_fence(&_threads_hazard_ptr, new_list); + AtomicAccess::release_store_fence(&_threads_hazard_ptr, new_list); } #if defined(__APPLE__) && defined(AARCH64) diff --git a/src/hotspot/share/runtime/threadHeapSampler.cpp b/src/hotspot/share/runtime/threadHeapSampler.cpp index d6271a690d63e..0fab41ed2fd29 100644 --- a/src/hotspot/share/runtime/threadHeapSampler.cpp +++ b/src/hotspot/share/runtime/threadHeapSampler.cpp @@ -26,7 +26,7 @@ #include "logging/log.hpp" #include "logging/logTag.hpp" #include "prims/jvmtiExport.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/threadHeapSampler.hpp" @@ -440,9 +440,9 @@ void ThreadHeapSampler::sample(oop obj, HeapWord* tlab_top) { } int ThreadHeapSampler::get_sampling_interval() { - return Atomic::load_acquire(&_sampling_interval); + return AtomicAccess::load_acquire(&_sampling_interval); } void ThreadHeapSampler::set_sampling_interval(int sampling_interval) { - Atomic::release_store(&_sampling_interval, sampling_interval); + AtomicAccess::release_store(&_sampling_interval, sampling_interval); } diff --git a/src/hotspot/share/runtime/threadIdentifier.cpp b/src/hotspot/share/runtime/threadIdentifier.cpp index 117cbfcabdc8c..0a3a90d5e19d5 100644 --- a/src/hotspot/share/runtime/threadIdentifier.cpp +++ b/src/hotspot/share/runtime/threadIdentifier.cpp @@ -22,7 +22,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/threadIdentifier.hpp" // starting at 3, excluding reserved values defined in ObjectMonitor.hpp @@ -38,14 +38,14 @@ int64_t ThreadIdentifier::unsafe_offset() { } int64_t ThreadIdentifier::current() { - return Atomic::load(&next_thread_id); + return AtomicAccess::load(&next_thread_id); } int64_t ThreadIdentifier::next() { int64_t next_tid; do { - next_tid = Atomic::load(&next_thread_id); - } while (Atomic::cmpxchg(&next_thread_id, next_tid, next_tid + 1) != next_tid); + next_tid = AtomicAccess::load(&next_thread_id); + } while (AtomicAccess::cmpxchg(&next_thread_id, next_tid, next_tid + 1) != next_tid); return next_tid; } diff --git a/src/hotspot/share/runtime/threadSMR.cpp b/src/hotspot/share/runtime/threadSMR.cpp index 6247b351573c3..418f7707118ea 100644 --- a/src/hotspot/share/runtime/threadSMR.cpp +++ b/src/hotspot/share/runtime/threadSMR.cpp @@ -25,7 +25,7 @@ #include "classfile/javaClasses.inline.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/orderAccess.hpp" @@ -68,7 +68,7 @@ volatile uint ThreadsSMRSupport::_deleted_thread_cnt = 0; // Max time in millis to delete a thread. // Impl note: 16-bit might be too small on an overloaded machine. Use -// unsigned since this is a time value. Set via Atomic::cmpxchg() in a +// unsigned since this is a time value. Set via AtomicAccess::cmpxchg() in a // loop for correctness. volatile uint ThreadsSMRSupport::_deleted_thread_time_max = 0; @@ -116,7 +116,7 @@ volatile uint ThreadsSMRSupport::_tlh_cnt = 0; // Max time in millis to delete a ThreadsListHandle. // Impl note: 16-bit might be too small on an overloaded machine. Use -// unsigned since this is a time value. Set via Atomic::cmpxchg() in a +// unsigned since this is a time value. Set via AtomicAccess::cmpxchg() in a // loop for correctness. volatile uint ThreadsSMRSupport::_tlh_time_max = 0; @@ -140,11 +140,11 @@ uint ThreadsSMRSupport::_to_delete_list_max = 0; // 'inline' functions first so the definitions are before first use: inline void ThreadsSMRSupport::add_deleted_thread_times(uint add_value) { - Atomic::add(&_deleted_thread_times, add_value); + AtomicAccess::add(&_deleted_thread_times, add_value); } inline void ThreadsSMRSupport::inc_deleted_thread_cnt() { - Atomic::inc(&_deleted_thread_cnt); + AtomicAccess::inc(&_deleted_thread_cnt); } inline void ThreadsSMRSupport::inc_java_thread_list_alloc_cnt() { @@ -162,7 +162,7 @@ inline void ThreadsSMRSupport::update_deleted_thread_time_max(uint new_value) { // No need to update max value so we're done. break; } - if (Atomic::cmpxchg(&_deleted_thread_time_max, cur_value, new_value) == cur_value) { + if (AtomicAccess::cmpxchg(&_deleted_thread_time_max, cur_value, new_value) == cur_value) { // Updated max value so we're done. Otherwise try it all again. break; } @@ -176,7 +176,7 @@ inline void ThreadsSMRSupport::update_java_thread_list_max(uint new_value) { } inline ThreadsList* ThreadsSMRSupport::xchg_java_thread_list(ThreadsList* new_list) { - return (ThreadsList*)Atomic::xchg(&_java_thread_list, new_list); + return (ThreadsList*)AtomicAccess::xchg(&_java_thread_list, new_list); } // Hash table of pointers found by a scan. Used for collecting hazard @@ -690,7 +690,7 @@ ThreadsList *ThreadsList::add_thread(ThreadsList *list, JavaThread *java_thread) } void ThreadsList::dec_nested_handle_cnt() { - Atomic::dec(&_nested_handle_cnt); + AtomicAccess::dec(&_nested_handle_cnt); } int ThreadsList::find_index_of_JavaThread(JavaThread *target) { @@ -733,7 +733,7 @@ JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const { } void ThreadsList::inc_nested_handle_cnt() { - Atomic::inc(&_nested_handle_cnt); + AtomicAccess::inc(&_nested_handle_cnt); } bool ThreadsList::includes(const JavaThread * const p) const { @@ -895,13 +895,13 @@ void ThreadsSMRSupport::add_thread(JavaThread *thread){ // when the delete_lock is dropped. // void ThreadsSMRSupport::clear_delete_notify() { - Atomic::dec(&_delete_notify); + AtomicAccess::dec(&_delete_notify); } bool ThreadsSMRSupport::delete_notify() { // Use load_acquire() in order to see any updates to _delete_notify // earlier than when delete_lock is grabbed. - return (Atomic::load_acquire(&_delete_notify) != 0); + return (AtomicAccess::load_acquire(&_delete_notify) != 0); } // Safely free a ThreadsList after a Threads::add() or Threads::remove(). @@ -1054,7 +1054,7 @@ void ThreadsSMRSupport::remove_thread(JavaThread *thread) { // See note for clear_delete_notify(). // void ThreadsSMRSupport::set_delete_notify() { - Atomic::inc(&_delete_notify); + AtomicAccess::inc(&_delete_notify); } // Safely delete a JavaThread when it is no longer in use by a diff --git a/src/hotspot/share/runtime/threadSMR.inline.hpp b/src/hotspot/share/runtime/threadSMR.inline.hpp index a78fbf4876144..05d29ff2b902e 100644 --- a/src/hotspot/share/runtime/threadSMR.inline.hpp +++ b/src/hotspot/share/runtime/threadSMR.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/shared/gc_globals.hpp" #include "gc/shared/tlab_globals.hpp" #include "memory/iterator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/debug.hpp" @@ -114,11 +114,11 @@ ThreadsListHandle::Iterator ThreadsListHandle::end() { return list()->end(); } // they are called by public inline update_tlh_stats() below: inline void ThreadsSMRSupport::add_tlh_times(uint add_value) { - Atomic::add(&_tlh_times, add_value); + AtomicAccess::add(&_tlh_times, add_value); } inline void ThreadsSMRSupport::inc_tlh_cnt() { - Atomic::inc(&_tlh_cnt); + AtomicAccess::inc(&_tlh_cnt); } inline void ThreadsSMRSupport::update_tlh_time_max(uint new_value) { @@ -128,7 +128,7 @@ inline void ThreadsSMRSupport::update_tlh_time_max(uint new_value) { // No need to update max value so we're done. break; } - if (Atomic::cmpxchg(&_tlh_time_max, cur_value, new_value) == cur_value) { + if (AtomicAccess::cmpxchg(&_tlh_time_max, cur_value, new_value) == cur_value) { // Updated max value so we're done. Otherwise try it all again. break; } @@ -136,7 +136,7 @@ inline void ThreadsSMRSupport::update_tlh_time_max(uint new_value) { } inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() { - return (ThreadsList*)Atomic::load_acquire(&_java_thread_list); + return (ThreadsList*)AtomicAccess::load_acquire(&_java_thread_list); } inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) { diff --git a/src/hotspot/share/runtime/vmThread.cpp b/src/hotspot/share/runtime/vmThread.cpp index c93469c1362b0..260b0f6f0433d 100644 --- a/src/hotspot/share/runtime/vmThread.cpp +++ b/src/hotspot/share/runtime/vmThread.cpp @@ -34,7 +34,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/verifyOopClosure.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/cpuTimeCounters.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -68,17 +68,17 @@ void VMOperationTimeoutTask::task() { } bool VMOperationTimeoutTask::is_armed() { - return Atomic::load_acquire(&_armed) != 0; + return AtomicAccess::load_acquire(&_armed) != 0; } void VMOperationTimeoutTask::arm(const char* vm_op_name) { _vm_op_name = vm_op_name; _arm_time = os::javaTimeNanos(); - Atomic::release_store_fence(&_armed, 1); + AtomicAccess::release_store_fence(&_armed, 1); } void VMOperationTimeoutTask::disarm() { - Atomic::release_store_fence(&_armed, 0); + AtomicAccess::release_store_fence(&_armed, 0); // The two stores to `_armed` are counted in VM-op, but they should be // insignificant compared to the actual VM-op duration. @@ -157,7 +157,7 @@ void VMThread::run() { // Notify_lock wait checks on is_running() to rewait in // case of spurious wakeup, it should wait on the last // value set prior to the notify - Atomic::store(&_is_running, true); + AtomicAccess::store(&_is_running, true); { MutexLocker ml(Notify_lock); diff --git a/src/hotspot/share/runtime/vmThread.hpp b/src/hotspot/share/runtime/vmThread.hpp index d2033f66ea543..26d111f27411b 100644 --- a/src/hotspot/share/runtime/vmThread.hpp +++ b/src/hotspot/share/runtime/vmThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_RUNTIME_VMTHREAD_HPP #define SHARE_RUNTIME_VMTHREAD_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/nonJavaThread.hpp" #include "runtime/perfDataTypes.hpp" @@ -88,7 +88,7 @@ class VMThread: public NamedThread { void loop(); public: - bool is_running() const { return Atomic::load(&_is_running); } + bool is_running() const { return AtomicAccess::load(&_is_running); } // Tester bool is_VM_thread() const { return true; } diff --git a/src/hotspot/share/services/attachListener.hpp b/src/hotspot/share/services/attachListener.hpp index 31be87184c14f..2a5cb8a7cc4f2 100644 --- a/src/hotspot/share/services/attachListener.hpp +++ b/src/hotspot/share/services/attachListener.hpp @@ -26,7 +26,7 @@ #define SHARE_SERVICES_ATTACHLISTENER_HPP #include "memory/allStatic.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "runtime/javaThread.inline.hpp" #include "utilities/debug.hpp" @@ -123,24 +123,24 @@ class AttachListener: AllStatic { } static void set_state(AttachListenerState new_state) { - Atomic::store(&_state, new_state); + AtomicAccess::store(&_state, new_state); } static AttachListenerState get_state() { - return Atomic::load(&_state); + return AtomicAccess::load(&_state); } static AttachListenerState transit_state(AttachListenerState new_state, AttachListenerState cmp_state) { - return Atomic::cmpxchg(&_state, cmp_state, new_state); + return AtomicAccess::cmpxchg(&_state, cmp_state, new_state); } static bool is_initialized() { - return Atomic::load(&_state) == AL_INITIALIZED; + return AtomicAccess::load(&_state) == AL_INITIALIZED; } static void set_initialized() { - Atomic::store(&_state, AL_INITIALIZED); + AtomicAccess::store(&_state, AL_INITIALIZED); } // indicates if this VM supports attach-on-demand diff --git a/src/hotspot/share/services/cpuTimeUsage.cpp b/src/hotspot/share/services/cpuTimeUsage.cpp index aa658a67bad8a..d6b01bcbf9ae7 100644 --- a/src/hotspot/share/services/cpuTimeUsage.cpp +++ b/src/hotspot/share/services/cpuTimeUsage.cpp @@ -76,9 +76,9 @@ jlong CPUTimeUsage::GC::stringdedup() { } bool CPUTimeUsage::Error::has_error() { - return Atomic::load(&_has_error); + return AtomicAccess::load(&_has_error); } void CPUTimeUsage::Error::mark_error() { - Atomic::store(&_has_error, true); + AtomicAccess::store(&_has_error, true); } diff --git a/src/hotspot/share/services/finalizerService.cpp b/src/hotspot/share/services/finalizerService.cpp index 969d02ad37d90..9acf17b8cfd38 100644 --- a/src/hotspot/share/services/finalizerService.cpp +++ b/src/hotspot/share/services/finalizerService.cpp @@ -30,7 +30,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/instanceKlass.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/fieldDescriptor.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.hpp" @@ -105,20 +105,20 @@ const char* FinalizerEntry::codesource() const { } uintptr_t FinalizerEntry::objects_on_heap() const { - return Atomic::load(&_objects_on_heap); + return AtomicAccess::load(&_objects_on_heap); } uintptr_t FinalizerEntry::total_finalizers_run() const { - return Atomic::load(&_total_finalizers_run); + return AtomicAccess::load(&_total_finalizers_run); } void FinalizerEntry::on_register() { - Atomic::inc(&_objects_on_heap, memory_order_relaxed); + AtomicAccess::inc(&_objects_on_heap, memory_order_relaxed); } void FinalizerEntry::on_complete() { - Atomic::inc(&_total_finalizers_run, memory_order_relaxed); - Atomic::dec(&_objects_on_heap, memory_order_relaxed); + AtomicAccess::inc(&_total_finalizers_run, memory_order_relaxed); + AtomicAccess::dec(&_objects_on_heap, memory_order_relaxed); } static inline uintx hash_function(const InstanceKlass* ik) { @@ -193,11 +193,11 @@ class FinalizerEntryLookupGet { }; static inline void set_has_work(bool value) { - Atomic::store(&_has_work, value); + AtomicAccess::store(&_has_work, value); } static inline bool has_work() { - return Atomic::load(&_has_work); + return AtomicAccess::load(&_has_work); } static void request_resize() { diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index cf62972ca1609..626434b08a70f 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -1720,8 +1720,8 @@ class ThreadDumper : public CHeapObj { void init_serial_nums(volatile int* thread_counter, volatile int* frame_counter) { assert(_start_frame_serial_num == 0, "already initialized"); - _thread_serial_num = Atomic::fetch_then_add(thread_counter, 1); - _start_frame_serial_num = Atomic::fetch_then_add(frame_counter, frame_count()); + _thread_serial_num = AtomicAccess::fetch_then_add(thread_counter, 1); + _start_frame_serial_num = AtomicAccess::fetch_then_add(frame_counter, frame_count()); } bool oom_thread() const { @@ -2252,7 +2252,7 @@ class VM_HeapDumper : public VM_GC_Operation, public WorkerTask, public Unmounte static bool is_vm_dumper(int dumper_id) { return dumper_id == VMDumperId; } // the 1st dumper calling get_next_dumper_id becomes VM dumper int get_next_dumper_id() { - return Atomic::fetch_then_add(&_dump_seq, 1); + return AtomicAccess::fetch_then_add(&_dump_seq, 1); } DumpWriter* writer() const { return _writer; } diff --git a/src/hotspot/share/services/lowMemoryDetector.hpp b/src/hotspot/share/services/lowMemoryDetector.hpp index 73491ea8412de..bdf79f6a2e487 100644 --- a/src/hotspot/share/services/lowMemoryDetector.hpp +++ b/src/hotspot/share/services/lowMemoryDetector.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "memory/allStatic.hpp" #include "oops/oopHandle.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "services/memoryPool.hpp" #include "services/memoryService.hpp" #include "services/memoryUsage.hpp" diff --git a/src/hotspot/share/services/memoryManager.cpp b/src/hotspot/share/services/memoryManager.cpp index 36ea234c1f30b..ef9babbb20dd8 100644 --- a/src/hotspot/share/services/memoryManager.cpp +++ b/src/hotspot/share/services/memoryManager.cpp @@ -28,7 +28,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" @@ -60,7 +60,7 @@ int MemoryManager::add_pool(MemoryPool* pool) { } bool MemoryManager::is_manager(instanceHandle mh) const { - if (Atomic::load_acquire(&_memory_mgr_obj_initialized)) { + if (AtomicAccess::load_acquire(&_memory_mgr_obj_initialized)) { return mh() == _memory_mgr_obj.resolve(); } else { return false; @@ -79,7 +79,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { // Lazily create the manager object. // Must do an acquire so as to force ordering of subsequent // loads from anything _memory_mgr_obj points to or implies. - if (!Atomic::load_acquire(&_memory_mgr_obj_initialized)) { + if (!AtomicAccess::load_acquire(&_memory_mgr_obj_initialized)) { // It's ok for more than one thread to execute the code up to the locked region. // Extra manager instances will just be gc'ed. Klass* k = Management::sun_management_ManagementFactoryHelper_klass(CHECK_NULL); @@ -136,7 +136,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { // Get lock since another thread may have created and installed the instance. MutexLocker ml(THREAD, Management_lock); - if (Atomic::load(&_memory_mgr_obj_initialized)) { + if (AtomicAccess::load(&_memory_mgr_obj_initialized)) { // Some other thread won the race. Release the handle we allocated and // use the other one. Relaxed load is sufficient because flag update is // under the lock. @@ -147,7 +147,7 @@ instanceOop MemoryManager::get_memory_manager_instance(TRAPS) { _memory_mgr_obj = mgr_handle; // Record manager has been created. Release matching unlocked acquire, // to safely publish the manager object. - Atomic::release_store(&_memory_mgr_obj_initialized, true); + AtomicAccess::release_store(&_memory_mgr_obj_initialized, true); } } diff --git a/src/hotspot/share/services/memoryPool.cpp b/src/hotspot/share/services/memoryPool.cpp index d5666eb57c108..41362181c6b67 100644 --- a/src/hotspot/share/services/memoryPool.cpp +++ b/src/hotspot/share/services/memoryPool.cpp @@ -29,7 +29,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -67,7 +67,7 @@ MemoryPool::MemoryPool(const char* name, {} bool MemoryPool::is_pool(instanceHandle pool) const { - if (Atomic::load_acquire(&_memory_pool_obj_initialized)) { + if (AtomicAccess::load_acquire(&_memory_pool_obj_initialized)) { return pool() == _memory_pool_obj.resolve(); } else { return false; @@ -90,7 +90,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { // Lazily create the pool object. // Must do an acquire so as to force ordering of subsequent // loads from anything _memory_pool_obj points to or implies. - if (!Atomic::load_acquire(&_memory_pool_obj_initialized)) { + if (!AtomicAccess::load_acquire(&_memory_pool_obj_initialized)) { // It's ok for more than one thread to execute the code up to the locked region. // Extra pool instances will just be gc'ed. InstanceKlass* ik = Management::sun_management_ManagementFactoryHelper_klass(CHECK_NULL); @@ -131,7 +131,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { // Get lock since another thread may have created and installed the instance. MutexLocker ml(THREAD, Management_lock); - if (Atomic::load(&_memory_pool_obj_initialized)) { + if (AtomicAccess::load(&_memory_pool_obj_initialized)) { // Some other thread won the race. Release the handle we allocated and // use the other one. Relaxed load is sufficient because flag update is // under the lock. @@ -142,7 +142,7 @@ instanceOop MemoryPool::get_memory_pool_instance(TRAPS) { _memory_pool_obj = pool_handle; // Record pool has been created. Release matching unlocked acquire, to // safely publish the pool object. - Atomic::release_store(&_memory_pool_obj_initialized, true); + AtomicAccess::release_store(&_memory_pool_obj_initialized, true); } } diff --git a/src/hotspot/share/services/threadIdTable.cpp b/src/hotspot/share/services/threadIdTable.cpp index e7fd979114834..24ea28abaf6d9 100644 --- a/src/hotspot/share/services/threadIdTable.cpp +++ b/src/hotspot/share/services/threadIdTable.cpp @@ -24,7 +24,7 @@ */ #include "classfile/javaClasses.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaThread.inline.hpp" #include "runtime/threadSMR.hpp" @@ -120,12 +120,12 @@ void ThreadIdTable::create_table(size_t size) { } void ThreadIdTable::item_added() { - Atomic::inc(&_items_count); + AtomicAccess::inc(&_items_count); log_trace(thread, table) ("Thread entry added"); } void ThreadIdTable::item_removed() { - Atomic::dec(&_items_count); + AtomicAccess::dec(&_items_count); log_trace(thread, table) ("Thread entry removed"); } diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index ad5c19d41c07c..04d39bf9cf2b8 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -41,7 +41,7 @@ #include "oops/oopHandle.inline.hpp" #include "prims/jvmtiRawMonitor.hpp" #include "prims/jvmtiThreadState.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/javaCalls.hpp" @@ -140,7 +140,7 @@ void ThreadService::add_thread(JavaThread* thread, bool daemon) { _total_threads_count->inc(); _live_threads_count->inc(); - Atomic::inc(&_atomic_threads_count); + AtomicAccess::inc(&_atomic_threads_count); int count = _atomic_threads_count; if (count > _peak_threads_count->get_value()) { @@ -149,15 +149,15 @@ void ThreadService::add_thread(JavaThread* thread, bool daemon) { if (daemon) { _daemon_threads_count->inc(); - Atomic::inc(&_atomic_daemon_threads_count); + AtomicAccess::inc(&_atomic_daemon_threads_count); } } void ThreadService::decrement_thread_counts(JavaThread* jt, bool daemon) { - Atomic::dec(&_atomic_threads_count); + AtomicAccess::dec(&_atomic_threads_count); if (daemon) { - Atomic::dec(&_atomic_daemon_threads_count); + AtomicAccess::dec(&_atomic_daemon_threads_count); } } diff --git a/src/hotspot/share/services/threadService.hpp b/src/hotspot/share/services/threadService.hpp index b1de4bc87038b..bbd0722c51f92 100644 --- a/src/hotspot/share/services/threadService.hpp +++ b/src/hotspot/share/services/threadService.hpp @@ -106,12 +106,12 @@ class ThreadService : public AllStatic { static int get_live_thread_count() { return _atomic_threads_count; } static int get_daemon_thread_count() { return _atomic_daemon_threads_count; } - static jlong exited_allocated_bytes() { return Atomic::load(&_exited_allocated_bytes); } + static jlong exited_allocated_bytes() { return AtomicAccess::load(&_exited_allocated_bytes); } static void incr_exited_allocated_bytes(jlong size) { // No need for an atomic add because called under the Threads_lock, // but because _exited_allocated_bytes is read concurrently, need // atomic store to avoid readers seeing a partial update. - Atomic::store(&_exited_allocated_bytes, _exited_allocated_bytes + size); + AtomicAccess::store(&_exited_allocated_bytes, _exited_allocated_bytes + size); } // Support for thread dump diff --git a/src/hotspot/share/utilities/accessFlags.cpp b/src/hotspot/share/utilities/accessFlags.cpp index 4423e3619eae9..ab4c7cde70903 100644 --- a/src/hotspot/share/utilities/accessFlags.cpp +++ b/src/hotspot/share/utilities/accessFlags.cpp @@ -23,7 +23,7 @@ */ #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/accessFlags.hpp" #if !defined(PRODUCT) || INCLUDE_JVMTI diff --git a/src/hotspot/share/utilities/bitMap.cpp b/src/hotspot/share/utilities/bitMap.cpp index 14b1dc8d10f36..b364608477678 100644 --- a/src/hotspot/share/utilities/bitMap.cpp +++ b/src/hotspot/share/utilities/bitMap.cpp @@ -24,7 +24,7 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" @@ -243,11 +243,11 @@ void BitMap::par_put_range_within_word(idx_t beg, idx_t end, bool value) { // required by inverted_bit_mask_for_range. Also avoids an unnecessary write. if (beg != end) { volatile bm_word_t* pw = word_addr(beg); - bm_word_t w = Atomic::load(pw); + bm_word_t w = AtomicAccess::load(pw); bm_word_t mr = inverted_bit_mask_for_range(beg, end); bm_word_t nw = value ? (w | ~mr) : (w & mr); while (true) { - bm_word_t res = Atomic::cmpxchg(pw, w, nw); + bm_word_t res = AtomicAccess::cmpxchg(pw, w, nw); if (res == w) break; w = res; nw = value ? (w | ~mr) : (w & mr); diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp index af547eb539a59..5ee462bbe47a1 100644 --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -26,7 +26,7 @@ #define SHARE_UTILITIES_BITMAP_HPP #include "nmt/memTag.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" // Forward decl; diff --git a/src/hotspot/share/utilities/bitMap.inline.hpp b/src/hotspot/share/utilities/bitMap.inline.hpp index 20345425638d6..833b89c750786 100644 --- a/src/hotspot/share/utilities/bitMap.inline.hpp +++ b/src/hotspot/share/utilities/bitMap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,7 @@ #include "utilities/bitMap.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/align.hpp" #include "utilities/count_trailing_zeros.hpp" #include "utilities/powerOfTwo.hpp" @@ -44,13 +44,13 @@ inline void BitMap::clear_bit(idx_t bit) { inline BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) { if (memory_order == memory_order_relaxed || memory_order == memory_order_release) { - return Atomic::load(addr); + return AtomicAccess::load(addr); } else { assert(memory_order == memory_order_acq_rel || memory_order == memory_order_acquire || memory_order == memory_order_conservative, "unexpected memory ordering"); - return Atomic::load_acquire(addr); + return AtomicAccess::load_acquire(addr); } } @@ -74,7 +74,7 @@ inline bool BitMap::par_set_bit(idx_t bit, atomic_memory_order memory_order) { if (new_val == old_val) { return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order); + const bm_word_t cur_val = AtomicAccess::cmpxchg(addr, old_val, new_val, memory_order); if (cur_val == old_val) { return true; // Success. } @@ -93,7 +93,7 @@ inline bool BitMap::par_clear_bit(idx_t bit, atomic_memory_order memory_order) { if (new_val == old_val) { return false; // Someone else beat us to it. } - const bm_word_t cur_val = Atomic::cmpxchg(addr, old_val, new_val, memory_order); + const bm_word_t cur_val = AtomicAccess::cmpxchg(addr, old_val, new_val, memory_order); if (cur_val == old_val) { return true; // Success. } diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index 3b6e3fefabffa..c01761bad6a01 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -28,7 +28,7 @@ #include "utilities/concurrentHashTable.hpp" #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" @@ -63,7 +63,7 @@ inline typename ConcurrentHashTable::Node* ConcurrentHashTable:: Node::next() const { - return Atomic::load_acquire(&_next); + return AtomicAccess::load_acquire(&_next); } // Bucket @@ -72,7 +72,7 @@ inline typename ConcurrentHashTable::Node* ConcurrentHashTable:: Bucket::first_raw() const { - return Atomic::load_acquire(&_first); + return AtomicAccess::load_acquire(&_first); } template @@ -84,7 +84,7 @@ inline void ConcurrentHashTable:: // Due to this assert this methods is not static. assert(is_locked(), "Must be locked."); Node** tmp = (Node**)dst; - Atomic::release_store(tmp, clear_set_state(node, *dst)); + AtomicAccess::release_store(tmp, clear_set_state(node, *dst)); } template @@ -93,7 +93,7 @@ ConcurrentHashTable:: Bucket::first() const { // We strip the states bit before returning the ptr. - return clear_state(Atomic::load_acquire(&_first)); + return clear_state(AtomicAccess::load_acquire(&_first)); } template @@ -150,7 +150,7 @@ inline bool ConcurrentHashTable:: if (is_locked()) { return false; } - if (Atomic::cmpxchg(&_first, expect, node) == expect) { + if (AtomicAccess::cmpxchg(&_first, expect, node) == expect) { return true; } return false; @@ -165,7 +165,7 @@ inline bool ConcurrentHashTable:: } // We will expect a clean first pointer. Node* tmp = first(); - if (Atomic::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) { + if (AtomicAccess::cmpxchg(&_first, tmp, set_state(tmp, STATE_LOCK_BIT)) == tmp) { return true; } return false; @@ -178,7 +178,7 @@ inline void ConcurrentHashTable:: assert(is_locked(), "Must be locked."); assert(!have_redirect(), "Unlocking a bucket after it has reached terminal state."); - Atomic::release_store(&_first, clear_state(first())); + AtomicAccess::release_store(&_first, clear_state(first())); } template @@ -186,7 +186,7 @@ inline void ConcurrentHashTable:: Bucket::redirect() { assert(is_locked(), "Must be locked."); - Atomic::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); + AtomicAccess::release_store(&_first, set_state(_first, STATE_REDIRECT_BIT)); } // InternalTable @@ -222,8 +222,8 @@ inline ConcurrentHashTable:: _cs_context(GlobalCounter::critical_section_begin(_thread)) { // This version is published now. - if (Atomic::load_acquire(&_cht->_invisible_epoch) != nullptr) { - Atomic::release_store_fence(&_cht->_invisible_epoch, (Thread*)nullptr); + if (AtomicAccess::load_acquire(&_cht->_invisible_epoch) != nullptr) { + AtomicAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)nullptr); } } @@ -294,13 +294,13 @@ inline void ConcurrentHashTable:: assert(_resize_lock_owner == thread, "Re-size lock not held"); OrderAccess::fence(); // Prevent below load from floating up. // If no reader saw this version we can skip write_synchronize. - if (Atomic::load_acquire(&_invisible_epoch) == thread) { + if (AtomicAccess::load_acquire(&_invisible_epoch) == thread) { return; } assert(_invisible_epoch == nullptr, "Two thread doing bulk operations"); // We set this/next version that we are synchronizing for to not published. // A reader will zero this flag if it reads this/next version. - Atomic::release_store(&_invisible_epoch, thread); + AtomicAccess::release_store(&_invisible_epoch, thread); GlobalCounter::write_synchronize(); } @@ -379,7 +379,7 @@ inline typename ConcurrentHashTable::InternalTable* ConcurrentHashTable:: get_table() const { - return Atomic::load_acquire(&_table); + return AtomicAccess::load_acquire(&_table); } template @@ -387,7 +387,7 @@ inline typename ConcurrentHashTable::InternalTable* ConcurrentHashTable:: get_new_table() const { - return Atomic::load_acquire(&_new_table); + return AtomicAccess::load_acquire(&_new_table); } template @@ -397,7 +397,7 @@ ConcurrentHashTable:: { InternalTable* old_table = _table; // Publish the new table. - Atomic::release_store(&_table, _new_table); + AtomicAccess::release_store(&_table, _new_table); // All must see this. GlobalCounter::write_synchronize(); // _new_table not read any more. @@ -797,7 +797,7 @@ inline void ConcurrentHashTable:: // Create and publish a new table InternalTable* table = new InternalTable(log2_size); _size_limit_reached = (log2_size == _log2_size_limit); - Atomic::release_store(&_table, table); + AtomicAccess::release_store(&_table, table); } template diff --git a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp index ce4a7298ca5f0..086a548ede521 100644 --- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp @@ -27,7 +27,7 @@ // No concurrentHashTableTasks.hpp -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/concurrentHashTable.inline.hpp" #include "utilities/globalDefinitions.hpp" @@ -62,8 +62,8 @@ class ConcurrentHashTable::BucketsOperation { } bool claim(size_t* start, size_t* stop) { - if (Atomic::load(&_next) < _limit) { - size_t claimed = Atomic::fetch_then_add(&_next, _size); + if (AtomicAccess::load(&_next) < _limit) { + size_t claimed = AtomicAccess::fetch_then_add(&_next, _size); if (claimed < _limit) { *start = claimed; *stop = MIN2(claimed + _size, _limit); @@ -78,7 +78,7 @@ class ConcurrentHashTable::BucketsOperation { } bool have_more_work() { - return Atomic::load_acquire(&_next) >= _limit; + return AtomicAccess::load_acquire(&_next) >= _limit; } }; diff --git a/src/hotspot/share/utilities/copy.hpp b/src/hotspot/share/utilities/copy.hpp index c9a114588edba..6d8d7b8e15e5a 100644 --- a/src/hotspot/share/utilities/copy.hpp +++ b/src/hotspot/share/utilities/copy.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_UTILITIES_COPY_HPP #include "oops/oopsHierarchy.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/globals.hpp" #include "utilities/align.hpp" #include "utilities/bytes.hpp" @@ -303,18 +303,18 @@ class Copy : AllStatic { HeapWord* to, size_t count) { switch (count) { - case 8: Atomic::store(&to[7], Atomic::load(&from[7])); - case 7: Atomic::store(&to[6], Atomic::load(&from[6])); - case 6: Atomic::store(&to[5], Atomic::load(&from[5])); - case 5: Atomic::store(&to[4], Atomic::load(&from[4])); - case 4: Atomic::store(&to[3], Atomic::load(&from[3])); - case 3: Atomic::store(&to[2], Atomic::load(&from[2])); - case 2: Atomic::store(&to[1], Atomic::load(&from[1])); - case 1: Atomic::store(&to[0], Atomic::load(&from[0])); + case 8: AtomicAccess::store(&to[7], AtomicAccess::load(&from[7])); + case 7: AtomicAccess::store(&to[6], AtomicAccess::load(&from[6])); + case 6: AtomicAccess::store(&to[5], AtomicAccess::load(&from[5])); + case 5: AtomicAccess::store(&to[4], AtomicAccess::load(&from[4])); + case 4: AtomicAccess::store(&to[3], AtomicAccess::load(&from[3])); + case 3: AtomicAccess::store(&to[2], AtomicAccess::load(&from[2])); + case 2: AtomicAccess::store(&to[1], AtomicAccess::load(&from[1])); + case 1: AtomicAccess::store(&to[0], AtomicAccess::load(&from[0])); case 0: break; default: while (count-- > 0) { - Atomic::store(to++, Atomic::load(from++)); + AtomicAccess::store(to++, AtomicAccess::load(from++)); } break; } diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp index bd82336020a0a..462762bb0c60f 100644 --- a/src/hotspot/share/utilities/debug.cpp +++ b/src/hotspot/share/utilities/debug.cpp @@ -39,7 +39,7 @@ #include "nmt/memTracker.hpp" #include "oops/klass.inline.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -266,7 +266,7 @@ void report_java_out_of_memory(const char* message) { // same time. To avoid dumping the heap or executing the data collection // commands multiple times we just do it once when the first threads reports // the error. - if (Atomic::cmpxchg(&out_of_memory_reported, 0, 1) == 0) { + if (AtomicAccess::cmpxchg(&out_of_memory_reported, 0, 1) == 0) { // create heap dump before OnOutOfMemoryError commands are executed if (HeapDumpOnOutOfMemoryError) { tty->print_cr("java.lang.OutOfMemoryError: %s", message); @@ -785,7 +785,7 @@ bool handle_assert_poison_fault(const void* ucVoid) { if (ucVoid != nullptr) { // Save context. const intx my_tid = os::current_thread_id(); - if (Atomic::cmpxchg(&g_asserting_thread, (intx)0, my_tid) == 0) { + if (AtomicAccess::cmpxchg(&g_asserting_thread, (intx)0, my_tid) == 0) { os::save_assert_context(ucVoid); } } diff --git a/src/hotspot/share/utilities/events.cpp b/src/hotspot/share/utilities/events.cpp index de86772816aac..6adb5311cb594 100644 --- a/src/hotspot/share/utilities/events.cpp +++ b/src/hotspot/share/utilities/events.cpp @@ -26,7 +26,7 @@ #include "memory/allocation.inline.hpp" #include "oops/instanceKlass.hpp" #include "oops/symbol.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/osThread.hpp" @@ -51,15 +51,15 @@ EventLog::EventLog() { // but use lock free add because there are some events that are created later. EventLog* old_head; do { - old_head = Atomic::load(&Events::_logs); + old_head = AtomicAccess::load(&Events::_logs); _next = old_head; - } while (Atomic::cmpxchg(&Events::_logs, old_head, this) != old_head); + } while (AtomicAccess::cmpxchg(&Events::_logs, old_head, this) != old_head); } // For each registered event logger, print out the current contents of // the buffer. void Events::print_all(outputStream* out, int max) { - EventLog* log = Atomic::load(&Events::_logs); + EventLog* log = AtomicAccess::load(&Events::_logs); while (log != nullptr) { log->print_log_on(out, max); log = log->next(); @@ -68,7 +68,7 @@ void Events::print_all(outputStream* out, int max) { // Print a single event log specified by name. void Events::print_one(outputStream* out, const char* log_name, int max) { - EventLog* log = Atomic::load(&Events::_logs); + EventLog* log = AtomicAccess::load(&Events::_logs); int num_printed = 0; while (log != nullptr) { if (log->matches_name_or_handle(log_name)) { @@ -81,7 +81,7 @@ void Events::print_one(outputStream* out, const char* log_name, int max) { if (num_printed == 0) { out->print_cr("The name \"%s\" did not match any known event log. " "Valid event log names are:", log_name); - EventLog* log = Atomic::load(&Events::_logs); + EventLog* log = AtomicAccess::load(&Events::_logs); while (log != nullptr) { log->print_names(out); out->cr(); diff --git a/src/hotspot/share/utilities/exceptions.cpp b/src/hotspot/share/utilities/exceptions.cpp index 2ee2132ebbd72..e7acb4387edaf 100644 --- a/src/hotspot/share/utilities/exceptions.cpp +++ b/src/hotspot/share/utilities/exceptions.cpp @@ -32,7 +32,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" @@ -177,7 +177,7 @@ void Exceptions::_throw(JavaThread* thread, const char* file, int line, Handle h } if (h_exception->is_a(vmClasses::LinkageError_klass())) { - Atomic::inc(&_linkage_errors, memory_order_relaxed); + AtomicAccess::inc(&_linkage_errors, memory_order_relaxed); } assert(h_exception->is_a(vmClasses::Throwable_klass()), "exception is not a subclass of java/lang/Throwable"); @@ -253,7 +253,7 @@ void Exceptions::throw_stack_overflow_exception(JavaThread* THREAD, const char* java_lang_Throwable::fill_in_stack_trace(exception, method); } // Increment counter for hs_err file reporting - Atomic::inc(&Exceptions::_stack_overflow_errors, memory_order_relaxed); + AtomicAccess::inc(&Exceptions::_stack_overflow_errors, memory_order_relaxed); } else { // if prior exception, throw that one instead exception = Handle(THREAD, THREAD->pending_exception()); @@ -495,12 +495,12 @@ volatile int Exceptions::_out_of_memory_error_class_metaspace_errors = 0; void Exceptions::count_out_of_memory_exceptions(Handle exception) { if (Universe::is_out_of_memory_error_metaspace(exception())) { - Atomic::inc(&_out_of_memory_error_metaspace_errors, memory_order_relaxed); + AtomicAccess::inc(&_out_of_memory_error_metaspace_errors, memory_order_relaxed); } else if (Universe::is_out_of_memory_error_class_metaspace(exception())) { - Atomic::inc(&_out_of_memory_error_class_metaspace_errors, memory_order_relaxed); + AtomicAccess::inc(&_out_of_memory_error_class_metaspace_errors, memory_order_relaxed); } else { // everything else reported as java heap OOM - Atomic::inc(&_out_of_memory_error_java_heap_errors, memory_order_relaxed); + AtomicAccess::inc(&_out_of_memory_error_java_heap_errors, memory_order_relaxed); } } diff --git a/src/hotspot/share/utilities/filterQueue.hpp b/src/hotspot/share/utilities/filterQueue.hpp index 0564af9e8354d..141c40f09c846 100644 --- a/src/hotspot/share/utilities/filterQueue.hpp +++ b/src/hotspot/share/utilities/filterQueue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_UTILITIES_FILTERQUEUE_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" // The FilterQueue is FIFO with the ability to skip over queued items. // The skipping is controlled by using a filter when popping. @@ -44,7 +44,7 @@ class FilterQueue { Node* _first; Node* load_first() { - return Atomic::load_acquire(&_first); + return AtomicAccess::load_acquire(&_first); } static bool match_all(E d) { return true; } diff --git a/src/hotspot/share/utilities/filterQueue.inline.hpp b/src/hotspot/share/utilities/filterQueue.inline.hpp index 7bb14ca2b0368..18b40b81c6c7a 100644 --- a/src/hotspot/share/utilities/filterQueue.inline.hpp +++ b/src/hotspot/share/utilities/filterQueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ void FilterQueue::push(E data) { while (true){ head = load_first(); insnode->_next = head; - if (Atomic::cmpxchg(&_first, head, insnode) == head) { + if (AtomicAccess::cmpxchg(&_first, head, insnode) == head) { break; } yield.wait(); @@ -91,7 +91,7 @@ E FilterQueue::pop(MATCH_FUNC& match_func) { if (match_prev == nullptr) { // Working on first - if (Atomic::cmpxchg(&_first, match, match->_next) == match) { + if (AtomicAccess::cmpxchg(&_first, match, match->_next) == match) { E ret = match->_data; delete match; return ret; diff --git a/src/hotspot/share/utilities/globalCounter.cpp b/src/hotspot/share/utilities/globalCounter.cpp index 7bdd68cbe315b..7019273d937cf 100644 --- a/src/hotspot/share/utilities/globalCounter.cpp +++ b/src/hotspot/share/utilities/globalCounter.cpp @@ -23,7 +23,7 @@ */ #include "memory/iterator.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" #include "runtime/threadSMR.inline.hpp" #include "runtime/vmThread.hpp" @@ -41,7 +41,7 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure { SpinYield yield; // Loops on this thread until it has exited the critical read section. while(true) { - uintx cnt = Atomic::load_acquire(thread->get_rcu_counter()); + uintx cnt = AtomicAccess::load_acquire(thread->get_rcu_counter()); // This checks if the thread's counter is active. And if so is the counter // for a pre-existing reader (belongs to this grace period). A pre-existing // reader will have a lower counter than the global counter version for this @@ -58,8 +58,8 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure { void GlobalCounter::write_synchronize() { assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section"); - // Atomic::add must provide fence since we have storeload dependency. - uintx gbl_cnt = Atomic::add(&_global_counter._counter, COUNTER_INCREMENT); + // AtomicAccess::add must provide fence since we have storeload dependency. + uintx gbl_cnt = AtomicAccess::add(&_global_counter._counter, COUNTER_INCREMENT); // Do all RCU threads. CounterThreadCheck ctc(gbl_cnt); diff --git a/src/hotspot/share/utilities/globalCounter.inline.hpp b/src/hotspot/share/utilities/globalCounter.inline.hpp index e86502f8afe33..9cc746173b8b7 100644 --- a/src/hotspot/share/utilities/globalCounter.inline.hpp +++ b/src/hotspot/share/utilities/globalCounter.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,20 +27,20 @@ #include "utilities/globalCounter.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/javaThread.hpp" inline GlobalCounter::CSContext GlobalCounter::critical_section_begin(Thread *thread) { assert(thread == Thread::current(), "must be current thread"); - uintx old_cnt = Atomic::load(thread->get_rcu_counter()); + uintx old_cnt = AtomicAccess::load(thread->get_rcu_counter()); // Retain the old counter value if already active, e.g. nested. // Otherwise, set the counter to the current version + active bit. uintx new_cnt = old_cnt; if ((new_cnt & COUNTER_ACTIVE) == 0) { - new_cnt = Atomic::load(&_global_counter._counter) | COUNTER_ACTIVE; + new_cnt = AtomicAccess::load(&_global_counter._counter) | COUNTER_ACTIVE; } - Atomic::release_store_fence(thread->get_rcu_counter(), new_cnt); + AtomicAccess::release_store_fence(thread->get_rcu_counter(), new_cnt); return static_cast(old_cnt); } @@ -49,8 +49,8 @@ GlobalCounter::critical_section_end(Thread *thread, CSContext context) { assert(thread == Thread::current(), "must be current thread"); assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section"); // Restore the counter value from before the associated begin. - Atomic::release_store(thread->get_rcu_counter(), - static_cast(context)); + AtomicAccess::release_store(thread->get_rcu_counter(), + static_cast(context)); } class GlobalCounter::CriticalSection { diff --git a/src/hotspot/share/utilities/lockFreeStack.hpp b/src/hotspot/share/utilities/lockFreeStack.hpp index af4aaeab29115..43bc58fbc445b 100644 --- a/src/hotspot/share/utilities/lockFreeStack.hpp +++ b/src/hotspot/share/utilities/lockFreeStack.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_UTILITIES_LOCKFREESTACK_HPP #define SHARE_UTILITIES_LOCKFREESTACK_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -65,7 +65,7 @@ class LockFreeStack { do { old = cur; set_next(*last, cur); - cur = Atomic::cmpxchg(&_top, cur, first); + cur = AtomicAccess::cmpxchg(&_top, cur, first); } while (old != cur); } @@ -89,7 +89,7 @@ class LockFreeStack { new_top = next(*result); } // CAS even on empty pop, for consistent membar behavior. - result = Atomic::cmpxchg(&_top, result, new_top); + result = AtomicAccess::cmpxchg(&_top, result, new_top); } while (result != old); if (result != nullptr) { set_next(*result, nullptr); @@ -101,7 +101,7 @@ class LockFreeStack { // list of elements. Acts as a full memory barrier. // postcondition: empty() T* pop_all() { - return Atomic::xchg(&_top, (T*)nullptr); + return AtomicAccess::xchg(&_top, (T*)nullptr); } // Atomically adds value to the top of this stack. Acts as a full @@ -145,7 +145,7 @@ class LockFreeStack { // Return the most recently pushed element, or nullptr if the stack is empty. // The returned element is not removed from the stack. - T* top() const { return Atomic::load(&_top); } + T* top() const { return AtomicAccess::load(&_top); } // Return the number of objects in the stack. There must be no concurrent // pops while the length is being determined. @@ -160,7 +160,7 @@ class LockFreeStack { // Return the entry following value in the list used by the // specialized LockFreeStack class. static T* next(const T& value) { - return Atomic::load(next_ptr(const_cast(value))); + return AtomicAccess::load(next_ptr(const_cast(value))); } // Set the entry following value to new_next in the list used by the @@ -168,7 +168,7 @@ class LockFreeStack { // if value is in an instance of this specialization of LockFreeStack, // there must be no concurrent push or pop operations on that stack. static void set_next(T& value, T* new_next) { - Atomic::store(next_ptr(value), new_next); + AtomicAccess::store(next_ptr(value), new_next); } }; diff --git a/src/hotspot/share/utilities/nonblockingQueue.inline.hpp b/src/hotspot/share/utilities/nonblockingQueue.inline.hpp index 174aacdeb7129..d805eedb7a43e 100644 --- a/src/hotspot/share/utilities/nonblockingQueue.inline.hpp +++ b/src/hotspot/share/utilities/nonblockingQueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,16 +27,16 @@ #include "utilities/nonblockingQueue.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" template T* NonblockingQueue::next(const T& node) { - return Atomic::load(next_ptr(const_cast(node))); + return AtomicAccess::load(next_ptr(const_cast(node))); } template void NonblockingQueue::set_next(T& node, T* new_next) { - Atomic::store(next_ptr(node), new_next); + AtomicAccess::store(next_ptr(node), new_next); } template @@ -60,7 +60,7 @@ T* NonblockingQueue::end_marker() const { template T* NonblockingQueue::first() const { - T* head = Atomic::load(&_head); + T* head = AtomicAccess::load(&_head); return head == nullptr ? end_marker() : head; } @@ -71,7 +71,7 @@ bool NonblockingQueue::is_end(const T* entry) const { template bool NonblockingQueue::empty() const { - return Atomic::load(&_head) == nullptr; + return AtomicAccess::load(&_head) == nullptr; } template @@ -105,14 +105,14 @@ void NonblockingQueue::append(T& first, T& last) { // extend after last. We will try to extend from the previous end of // queue. set_next(last, end_marker()); - T* old_tail = Atomic::xchg(&_tail, &last); + T* old_tail = AtomicAccess::xchg(&_tail, &last); if (old_tail == nullptr) { // If old_tail is null then the queue was empty, and _head must also be // null. The correctness of this assertion depends on try_pop clearing // first _head then _tail when taking the last entry. - assert(Atomic::load(&_head) == nullptr, "invariant"); + assert(AtomicAccess::load(&_head) == nullptr, "invariant"); // Fall through to common update of _head. - } else if (is_end(Atomic::cmpxchg(next_ptr(*old_tail), end_marker(), &first))) { + } else if (is_end(AtomicAccess::cmpxchg(next_ptr(*old_tail), end_marker(), &first))) { // Successfully extended the queue list from old_tail to first. No // other push/append could have competed with us, because we claimed // old_tail for extension. We won any races with try_pop by changing @@ -128,26 +128,26 @@ void NonblockingQueue::append(T& first, T& last) { // A concurrent try_pop has claimed old_tail, so it is no longer in the // list. The queue was logically empty. _head is either null or // old_tail, depending on how far try_pop operations have progressed. - DEBUG_ONLY(T* old_head = Atomic::load(&_head);) + DEBUG_ONLY(T* old_head = AtomicAccess::load(&_head);) assert((old_head == nullptr) || (old_head == old_tail), "invariant"); // Fall through to common update of _head. } // The queue was empty, and first should become the new _head. The queue // will appear to be empty to any further try_pops until done. - Atomic::store(&_head, &first); + AtomicAccess::store(&_head, &first); } template bool NonblockingQueue::try_pop(T** node_ptr) { // We only need memory_order_consume. Upgrade it to "load_acquire" // as the memory_order_consume API is not ready for use yet. - T* old_head = Atomic::load_acquire(&_head); + T* old_head = AtomicAccess::load_acquire(&_head); if (old_head == nullptr) { *node_ptr = nullptr; return true; // Queue is empty. } - T* next_node = Atomic::load_acquire(next_ptr(*old_head)); + T* next_node = AtomicAccess::load_acquire(next_ptr(*old_head)); if (!is_end(next_node)) { // [Clause 1] // There are several cases for next_node. @@ -160,7 +160,7 @@ bool NonblockingQueue::try_pop(T** node_ptr) { // _head. The success or failure of that attempt, along with the value // of next_node, are used to partially determine which case we're in and // how to proceed. In particular, advancement will fail for case (3). - if (old_head != Atomic::cmpxchg(&_head, old_head, next_node)) { + if (old_head != AtomicAccess::cmpxchg(&_head, old_head, next_node)) { // [Clause 1a] // The cmpxchg to advance the list failed; a concurrent try_pop won // the race and claimed old_head. This can happen for any of the @@ -188,7 +188,7 @@ bool NonblockingQueue::try_pop(T** node_ptr) { return true; } - } else if (is_end(Atomic::cmpxchg(next_ptr(*old_head), next_node, (T*)nullptr))) { + } else if (is_end(AtomicAccess::cmpxchg(next_ptr(*old_head), next_node, (T*)nullptr))) { // [Clause 2] // Old_head was the last entry and we've claimed it by setting its next // value to null. However, this leaves the queue in disarray. Fix up @@ -203,13 +203,13 @@ bool NonblockingQueue::try_pop(T** node_ptr) { // Attempt to change the queue head from old_head to null. Failure of // the cmpxchg indicates a concurrent operation updated _head first. That // could be either a push/append or a try_pop in [Clause 1b]. - Atomic::cmpxchg(&_head, old_head, (T*)nullptr); + AtomicAccess::cmpxchg(&_head, old_head, (T*)nullptr); // Attempt to change the queue tail from old_head to null. Failure of // the cmpxchg indicates that a concurrent push/append updated _tail first. // That operation will eventually recognize the old tail (our old_head) is // no longer in the list and update _head from the list being appended. - Atomic::cmpxchg(&_tail, old_head, (T*)nullptr); + AtomicAccess::cmpxchg(&_tail, old_head, (T*)nullptr); // The queue has been restored to order, and we can return old_head. *node_ptr = old_head; @@ -237,11 +237,11 @@ T* NonblockingQueue::pop() { template Pair NonblockingQueue::take_all() { - T* tail = Atomic::load(&_tail); + T* tail = AtomicAccess::load(&_tail); if (tail != nullptr) set_next(*tail, nullptr); // Clear end marker. - Pair result(Atomic::load(&_head), tail); - Atomic::store(&_head, (T*)nullptr); - Atomic::store(&_tail, (T*)nullptr); + Pair result(AtomicAccess::load(&_head), tail); + AtomicAccess::store(&_head, (T*)nullptr); + AtomicAccess::store(&_tail, (T*)nullptr); return result; } diff --git a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp index 58f0e24578026..5e4c9777468c8 100644 --- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp +++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp @@ -22,7 +22,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" @@ -43,7 +43,7 @@ SingleWriterSynchronizer::SingleWriterSynchronizer() : // synchronization have exited that critical section. void SingleWriterSynchronizer::synchronize() { // Side-effect in assert balanced by debug-only dec at end. - assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers"); + assert(AtomicAccess::add(&_writers, 1u) == 1u, "multiple writers"); // We don't know anything about the muxing between this invocation // and invocations in other threads. We must start with the latest // _enter polarity, else we could clobber the wrong _exit value on @@ -63,7 +63,7 @@ void SingleWriterSynchronizer::synchronize() { do { old = value; *new_ptr = ++value; - value = Atomic::cmpxchg(&_enter, old, value); + value = AtomicAccess::cmpxchg(&_enter, old, value); } while (old != value); // Critical sections entered before we changed the polarity will use // the old exit counter. Critical sections entered after the change @@ -84,7 +84,7 @@ void SingleWriterSynchronizer::synchronize() { // to complete, e.g. for the value of old_ptr to catch up with old. // Loop because there could be pending wakeups unrelated to this // synchronize request. - while (old != Atomic::load_acquire(old_ptr)) { + while (old != AtomicAccess::load_acquire(old_ptr)) { _wakeup.wait(); } // (5) Drain any pending wakeups. A critical section exit may have @@ -95,5 +95,5 @@ void SingleWriterSynchronizer::synchronize() { // lead to semaphore overflow. This doesn't guarantee no unrelated // wakeups for the next wait, but prevents unbounded accumulation. while (_wakeup.trywait()) {} - DEBUG_ONLY(Atomic::dec(&_writers);) + DEBUG_ONLY(AtomicAccess::dec(&_writers);) } diff --git a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp index b49e75ffe9074..737d5c6d4acce 100644 --- a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp +++ b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #define SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/semaphore.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -87,11 +87,11 @@ class SingleWriterSynchronizer { }; inline uint SingleWriterSynchronizer::enter() { - return Atomic::add(&_enter, 2u); + return AtomicAccess::add(&_enter, 2u); } inline void SingleWriterSynchronizer::exit(uint enter_value) { - uint exit_value = Atomic::add(&_exit[enter_value & 1], 2u); + uint exit_value = AtomicAccess::add(&_exit[enter_value & 1], 2u); // If this exit completes a synchronize request, wakeup possibly // waiting synchronizer. Read of _waiting_for must follow the _exit // update. diff --git a/src/hotspot/share/utilities/tableStatistics.cpp b/src/hotspot/share/utilities/tableStatistics.cpp index b6ce4f4f6e97a..331652becd57c 100644 --- a/src/hotspot/share/utilities/tableStatistics.cpp +++ b/src/hotspot/share/utilities/tableStatistics.cpp @@ -22,7 +22,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" @@ -42,7 +42,7 @@ TableRateStatistics::~TableRateStatistics() { }; void TableRateStatistics::add() { #if INCLUDE_JFR if (Jfr::is_recording()) { - Atomic::inc(&_added_items); + AtomicAccess::inc(&_added_items); } #endif } @@ -50,7 +50,7 @@ void TableRateStatistics::add() { void TableRateStatistics::remove() { #if INCLUDE_JFR if (Jfr::is_recording()) { - Atomic::inc(&_removed_items); + AtomicAccess::inc(&_removed_items); } #endif } diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp index 81e357f1c4f39..357bf111804ec 100644 --- a/src/hotspot/share/utilities/vmError.cpp +++ b/src/hotspot/share/utilities/vmError.cpp @@ -43,7 +43,7 @@ #include "oops/compressedOops.hpp" #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/flags/jvmFlag.hpp" #include "runtime/frame.inline.hpp" #include "runtime/init.hpp" @@ -559,24 +559,24 @@ jlong VMError::get_current_timestamp() { void VMError::record_reporting_start_time() { const jlong now = get_current_timestamp(); - Atomic::store(&_reporting_start_time, now); + AtomicAccess::store(&_reporting_start_time, now); } jlong VMError::get_reporting_start_time() { - return Atomic::load(&_reporting_start_time); + return AtomicAccess::load(&_reporting_start_time); } void VMError::record_step_start_time() { const jlong now = get_current_timestamp(); - Atomic::store(&_step_start_time, now); + AtomicAccess::store(&_step_start_time, now); } jlong VMError::get_step_start_time() { - return Atomic::load(&_step_start_time); + return AtomicAccess::load(&_step_start_time); } void VMError::clear_step_start_time() { - return Atomic::store(&_step_start_time, (jlong)0); + return AtomicAccess::store(&_step_start_time, (jlong)0); } // This is the main function to report a fatal error. Only one thread can @@ -1341,21 +1341,21 @@ void VMError::report(outputStream* st, bool _verbose) { void VMError::set_handshake_timed_out_thread(Thread* thread) { // Only preserve the first thread to time-out this way. The atomic operation ensures // visibility to the target thread. - Atomic::replace_if_null(&_handshake_timed_out_thread, thread); + AtomicAccess::replace_if_null(&_handshake_timed_out_thread, thread); } void VMError::set_safepoint_timed_out_thread(Thread* thread) { // Only preserve the first thread to time-out this way. The atomic operation ensures // visibility to the target thread. - Atomic::replace_if_null(&_safepoint_timed_out_thread, thread); + AtomicAccess::replace_if_null(&_safepoint_timed_out_thread, thread); } Thread* VMError::get_handshake_timed_out_thread() { - return Atomic::load(&_handshake_timed_out_thread); + return AtomicAccess::load(&_handshake_timed_out_thread); } Thread* VMError::get_safepoint_timed_out_thread() { - return Atomic::load(&_safepoint_timed_out_thread); + return AtomicAccess::load(&_safepoint_timed_out_thread); } // Report for the vm_info_cmd. This prints out the information above omitting @@ -1691,7 +1691,7 @@ void VMError::report_and_die(int id, const char* message, const char* detail_fmt intptr_t mytid = os::current_thread_id(); if (_first_error_tid == -1 && - Atomic::cmpxchg(&_first_error_tid, (intptr_t)-1, mytid) == -1) { + AtomicAccess::cmpxchg(&_first_error_tid, (intptr_t)-1, mytid) == -1) { if (SuppressFatalErrorMessage) { os::abort(CreateCoredumpOnCrash); diff --git a/src/hotspot/share/utilities/waitBarrier_generic.cpp b/src/hotspot/share/utilities/waitBarrier_generic.cpp index efaa49b24a9c2..a6436d93ffc4d 100644 --- a/src/hotspot/share/utilities/waitBarrier_generic.cpp +++ b/src/hotspot/share/utilities/waitBarrier_generic.cpp @@ -23,7 +23,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/spinYield.hpp" @@ -79,10 +79,10 @@ void GenericWaitBarrier::arm(int barrier_tag) { assert(barrier_tag != 0, "Pre arm: Should be arming with armed value"); - assert(Atomic::load(&_barrier_tag) == 0, + assert(AtomicAccess::load(&_barrier_tag) == 0, "Pre arm: Should not be already armed. Tag: %d", - Atomic::load(&_barrier_tag)); - Atomic::release_store(&_barrier_tag, barrier_tag); + AtomicAccess::load(&_barrier_tag)); + AtomicAccess::release_store(&_barrier_tag, barrier_tag); Cell &cell = tag_to_cell(barrier_tag); cell.arm(barrier_tag); @@ -92,9 +92,9 @@ void GenericWaitBarrier::arm(int barrier_tag) { } void GenericWaitBarrier::disarm() { - int barrier_tag = Atomic::load_acquire(&_barrier_tag); + int barrier_tag = AtomicAccess::load_acquire(&_barrier_tag); assert(barrier_tag != 0, "Pre disarm: Should be armed. Tag: %d", barrier_tag); - Atomic::release_store(&_barrier_tag, 0); + AtomicAccess::release_store(&_barrier_tag, 0); Cell &cell = tag_to_cell(barrier_tag); cell.disarm(barrier_tag); @@ -121,7 +121,7 @@ void GenericWaitBarrier::Cell::arm(int32_t requested_tag) { SpinYield sp; while (true) { - state = Atomic::load_acquire(&_state); + state = AtomicAccess::load_acquire(&_state); assert(decode_tag(state) == 0, "Pre arm: Should not be armed. " "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT, @@ -134,7 +134,7 @@ void GenericWaitBarrier::Cell::arm(int32_t requested_tag) { // Try to swing cell to armed. This should always succeed after the check above. int64_t new_state = encode(requested_tag, 0); - int64_t prev_state = Atomic::cmpxchg(&_state, state, new_state); + int64_t prev_state = AtomicAccess::cmpxchg(&_state, state, new_state); if (prev_state != state) { fatal("Cannot arm the wait barrier. " "Tag: " INT32_FORMAT "; Waiters: " INT32_FORMAT, @@ -145,14 +145,14 @@ void GenericWaitBarrier::Cell::arm(int32_t requested_tag) { int GenericWaitBarrier::Cell::signal_if_needed(int max) { int signals = 0; while (true) { - int cur = Atomic::load_acquire(&_outstanding_wakeups); + int cur = AtomicAccess::load_acquire(&_outstanding_wakeups); if (cur == 0) { // All done, no more waiters. return 0; } assert(cur > 0, "Sanity"); - int prev = Atomic::cmpxchg(&_outstanding_wakeups, cur, cur - 1); + int prev = AtomicAccess::cmpxchg(&_outstanding_wakeups, cur, cur - 1); if (prev != cur) { // Contention, return to caller for early return or backoff. return prev; @@ -172,7 +172,7 @@ void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) { int32_t waiters; while (true) { - int64_t state = Atomic::load_acquire(&_state); + int64_t state = AtomicAccess::load_acquire(&_state); int32_t tag = decode_tag(state); waiters = decode_waiters(state); @@ -182,7 +182,7 @@ void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) { tag, waiters); int64_t new_state = encode(0, waiters); - if (Atomic::cmpxchg(&_state, state, new_state) == state) { + if (AtomicAccess::cmpxchg(&_state, state, new_state) == state) { // Successfully disarmed. break; } @@ -191,19 +191,19 @@ void GenericWaitBarrier::Cell::disarm(int32_t expected_tag) { // Wake up waiters, if we have at least one. // Allow other threads to assist with wakeups, if possible. if (waiters > 0) { - Atomic::release_store(&_outstanding_wakeups, waiters); + AtomicAccess::release_store(&_outstanding_wakeups, waiters); SpinYield sp; while (signal_if_needed(INT_MAX) > 0) { sp.wait(); } } - assert(Atomic::load(&_outstanding_wakeups) == 0, "Post disarm: Should not have outstanding wakeups"); + assert(AtomicAccess::load(&_outstanding_wakeups) == 0, "Post disarm: Should not have outstanding wakeups"); } void GenericWaitBarrier::Cell::wait(int32_t expected_tag) { // Try to register ourselves as pending waiter. while (true) { - int64_t state = Atomic::load_acquire(&_state); + int64_t state = AtomicAccess::load_acquire(&_state); int32_t tag = decode_tag(state); if (tag != expected_tag) { // Cell tag had changed while waiting here. This means either the cell had @@ -219,7 +219,7 @@ void GenericWaitBarrier::Cell::wait(int32_t expected_tag) { tag, waiters); int64_t new_state = encode(tag, waiters + 1); - if (Atomic::cmpxchg(&_state, state, new_state) == state) { + if (AtomicAccess::cmpxchg(&_state, state, new_state) == state) { // Success! Proceed to wait. break; } @@ -238,7 +238,7 @@ void GenericWaitBarrier::Cell::wait(int32_t expected_tag) { // Register ourselves as completed waiter before leaving. while (true) { - int64_t state = Atomic::load_acquire(&_state); + int64_t state = AtomicAccess::load_acquire(&_state); int32_t tag = decode_tag(state); int32_t waiters = decode_waiters(state); @@ -248,7 +248,7 @@ void GenericWaitBarrier::Cell::wait(int32_t expected_tag) { tag, waiters); int64_t new_state = encode(tag, waiters - 1); - if (Atomic::cmpxchg(&_state, state, new_state) == state) { + if (AtomicAccess::cmpxchg(&_state, state, new_state) == state) { // Success! break; } diff --git a/src/hotspot/share/utilities/zipLibrary.cpp b/src/hotspot/share/utilities/zipLibrary.cpp index ae68fb9ef7703..54875516a0fa8 100644 --- a/src/hotspot/share/utilities/zipLibrary.cpp +++ b/src/hotspot/share/utilities/zipLibrary.cpp @@ -53,7 +53,7 @@ static void* _zip_handle = nullptr; static bool _loaded = false; static inline bool is_loaded() { - return Atomic::load_acquire(&_loaded); + return AtomicAccess::load_acquire(&_loaded); } static inline bool not_loaded() { @@ -111,7 +111,7 @@ static void load_zip_library(bool vm_exit_on_failure) { } store_function_pointers(&path[0], vm_exit_on_failure); - Atomic::release_store(&_loaded, true); + AtomicAccess::release_store(&_loaded, true); assert(is_loaded(), "invariant"); } diff --git a/test/hotspot/gtest/cds/test_archiveWorkers.cpp b/test/hotspot/gtest/cds/test_archiveWorkers.cpp index 825c27151d5c6..55c3528f0c2ba 100644 --- a/test/hotspot/gtest/cds/test_archiveWorkers.cpp +++ b/test/hotspot/gtest/cds/test_archiveWorkers.cpp @@ -32,11 +32,11 @@ class TestArchiveWorkerTask : public ArchiveWorkerTask { public: TestArchiveWorkerTask() : ArchiveWorkerTask("Test"), _sum(0), _max(0) {} void work(int chunk, int max_chunks) override { - Atomic::add(&_sum, chunk); - Atomic::store(&_max, max_chunks); + AtomicAccess::add(&_sum, chunk); + AtomicAccess::store(&_max, max_chunks); } - int sum() { return Atomic::load(&_sum); } - int max() { return Atomic::load(&_max); } + int sum() { return AtomicAccess::load(&_sum); } + int max() { return AtomicAccess::load(&_max); } }; // Test a repeated cycle of workers init/shutdown without task works. diff --git a/test/hotspot/gtest/gc/g1/test_g1BatchedGangTask.cpp b/test/hotspot/gtest/gc/g1/test_g1BatchedGangTask.cpp index e2cc627ed9197..902406eaf6525 100644 --- a/test/hotspot/gtest/gc/g1/test_g1BatchedGangTask.cpp +++ b/test/hotspot/gtest/gc/g1/test_g1BatchedGangTask.cpp @@ -24,7 +24,7 @@ #include "gc/g1/g1BatchedTask.hpp" #include "gc/shared/workerThread.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "unittest.hpp" class G1BatchedTaskWorkers : AllStatic { @@ -62,13 +62,13 @@ class G1TestSubTask : public G1AbstractSubTask { uint _max_workers; void do_work_called(uint worker_id) { - Atomic::inc(&_num_do_work); - bool orig_value = Atomic::cmpxchg(&_do_work_called_by[worker_id], false, true); + AtomicAccess::inc(&_num_do_work); + bool orig_value = AtomicAccess::cmpxchg(&_do_work_called_by[worker_id], false, true); ASSERT_EQ(orig_value, false); } void verify_do_work_called_by(uint num_workers) { - ASSERT_EQ(Atomic::load(&_num_do_work), num_workers); + ASSERT_EQ(AtomicAccess::load(&_num_do_work), num_workers); // Do not need to check the _do_work_called_by array. The count is already verified // by above statement, and we already check that a given flag is only set once. } diff --git a/test/hotspot/gtest/gc/g1/test_g1CardSet.cpp b/test/hotspot/gtest/gc/g1/test_g1CardSet.cpp index 958561dc15e51..70a70dc069f6f 100644 --- a/test/hotspot/gtest/gc/g1/test_g1CardSet.cpp +++ b/test/hotspot/gtest/gc/g1/test_g1CardSet.cpp @@ -413,8 +413,8 @@ class G1CardSetMtTestTask : public WorkerTask { found++; } } - Atomic::add(&_added, added); - Atomic::add(&_found, found); + AtomicAccess::add(&_added, added); + AtomicAccess::add(&_found, found); } size_t added() const { return _added; } diff --git a/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp b/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp index d7cf3a6361c72..d7e1c73f303e8 100644 --- a/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp +++ b/test/hotspot/gtest/gc/g1/test_g1FreeIdSet.cpp @@ -24,7 +24,7 @@ #include "gc/g1/g1FreeIdSet.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/semaphore.inline.hpp" #include "runtime/thread.hpp" @@ -43,7 +43,7 @@ struct G1FreeIdSet::TestSupport : AllStatic { static uint start(const G1FreeIdSet& set) { return set._start; } static uint size(const G1FreeIdSet& set) { return set._size; } static uintx mask(const G1FreeIdSet& set) { return set._head_index_mask; } - static uintx head(const G1FreeIdSet& set) { return Atomic::load(&set._head); } + static uintx head(const G1FreeIdSet& set) { return AtomicAccess::load(&set._head); } static uint head_index(const G1FreeIdSet& set, uintx head) { return set.head_index(head); @@ -106,14 +106,14 @@ class TestG1FreeIdSetThread : public JavaTestThread { {} virtual void main_run() { - while (Atomic::load_acquire(_continue_running)) { + while (AtomicAccess::load_acquire(_continue_running)) { uint id = _set->claim_par_id(); _set->release_par_id(id); ++_allocations; ThreadBlockInVM tbiv(this); // Safepoint check. } tty->print_cr("%u allocations: %zu", _thread_number, _allocations); - Atomic::add(_total_allocations, _allocations); + AtomicAccess::add(_total_allocations, _allocations); } }; @@ -145,7 +145,7 @@ TEST_VM(G1FreeIdSetTest, stress) { ThreadInVMfromNative invm(this_thread); this_thread->sleep(milliseconds_to_run); } - Atomic::release_store(&continue_running, false); + AtomicAccess::release_store(&continue_running, false); for (uint i = 0; i < nthreads; ++i) { ThreadInVMfromNative invm(this_thread); post.wait_with_safepoint_check(this_thread); diff --git a/test/hotspot/gtest/gc/g1/test_stressCommitUncommit.cpp b/test/hotspot/gtest/gc/g1/test_stressCommitUncommit.cpp index 45da22ce116be..6b3e9fd50bc84 100644 --- a/test/hotspot/gtest/gc/g1/test_stressCommitUncommit.cpp +++ b/test/hotspot/gtest/gc/g1/test_stressCommitUncommit.cpp @@ -26,7 +26,7 @@ #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "gc/shared/workerThread.hpp" #include "memory/memoryReserver.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "unittest.hpp" @@ -59,7 +59,7 @@ class G1TestCommitUncommit : public WorkerTask { _claim_id(0) { } void work(uint worker_id) { - uint index = Atomic::fetch_then_add(&_claim_id, 1u); + uint index = AtomicAccess::fetch_then_add(&_claim_id, 1u); for (int i = 0; i < 100000; i++) { // Stress commit and uncommit of a single region. The same diff --git a/test/hotspot/gtest/gc/shared/test_bufferNodeAllocator.cpp b/test/hotspot/gtest/gc/shared/test_bufferNodeAllocator.cpp index 7169587e71640..ec72ce404e3de 100644 --- a/test/hotspot/gtest/gc/shared/test_bufferNodeAllocator.cpp +++ b/test/hotspot/gtest/gc/shared/test_bufferNodeAllocator.cpp @@ -24,7 +24,7 @@ #include "gc/shared/bufferNode.hpp" #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/semaphore.inline.hpp" #include "runtime/thread.hpp" @@ -135,14 +135,14 @@ class BufferNode::TestSupport::AllocatorThread : public JavaTestThread { {} virtual void main_run() { - while (Atomic::load_acquire(_continue_running)) { + while (AtomicAccess::load_acquire(_continue_running)) { BufferNode* node = _allocator->allocate(); _cbl->push(node); ++_allocations; ThreadBlockInVM tbiv(this); // Safepoint check. } tty->print_cr("allocations: %zu", _allocations); - Atomic::add(_total_allocations, _allocations); + AtomicAccess::add(_total_allocations, _allocations); } }; @@ -172,7 +172,7 @@ class BufferNode::TestSupport::ProcessorThread : public JavaTestThread { _allocator->release(node); } else if (shutdown_requested) { return; - } else if (!Atomic::load_acquire(_continue_running)) { + } else if (!AtomicAccess::load_acquire(_continue_running)) { // To avoid a race that could leave buffers in the list after this // thread has shut down, continue processing until the list is empty // *after* the shut down request has been received. @@ -222,12 +222,12 @@ static void run_test(BufferNode::Allocator* allocator, CompletedList* cbl) { ThreadInVMfromNative invm(this_thread); this_thread->sleep(milliseconds_to_run); } - Atomic::release_store(&allocator_running, false); + AtomicAccess::release_store(&allocator_running, false); for (uint i = 0; i < num_allocator_threads; ++i) { ThreadInVMfromNative invm(this_thread); post.wait_with_safepoint_check(this_thread); } - Atomic::release_store(&processor_running, false); + AtomicAccess::release_store(&processor_running, false); for (uint i = 0; i < num_processor_threads; ++i) { ThreadInVMfromNative invm(this_thread); post.wait_with_safepoint_check(this_thread); diff --git a/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp b/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp index d0c6dc120a3b1..69548b06e5102 100644 --- a/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp +++ b/test/hotspot/gtest/jfr/test_adaptiveSampler.cpp @@ -39,7 +39,7 @@ #include "jfr/utilities/jfrTimeConverter.hpp" #include "jfr/utilities/jfrTryLock.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "unittest.hpp" diff --git a/test/hotspot/gtest/oops/test_markWord.cpp b/test/hotspot/gtest/oops/test_markWord.cpp index aef2ac14ebc02..226d5a2dd74a0 100644 --- a/test/hotspot/gtest/oops/test_markWord.cpp +++ b/test/hotspot/gtest/oops/test_markWord.cpp @@ -26,7 +26,7 @@ #include "memory/universe.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" diff --git a/test/hotspot/gtest/runtime/test_atomic.cpp b/test/hotspot/gtest/runtime/test_atomicAccess.cpp similarity index 69% rename from test/hotspot/gtest/runtime/test_atomic.cpp rename to test/hotspot/gtest/runtime/test_atomicAccess.cpp index e4e718fa33e81..523f27ca87062 100644 --- a/test/hotspot/gtest/runtime/test_atomic.cpp +++ b/test/hotspot/gtest/runtime/test_atomicAccess.cpp @@ -22,7 +22,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "unittest.hpp" // These tests of Atomic only verify functionality. They don't verify atomicity. @@ -36,19 +36,19 @@ struct AtomicAddTestSupport { void test_add() { T zero = 0; T five = 5; - Atomic::store(&_test_value, zero); - T value = Atomic::add(&_test_value, five); + AtomicAccess::store(&_test_value, zero); + T value = AtomicAccess::add(&_test_value, five); EXPECT_EQ(five, value); - EXPECT_EQ(five, Atomic::load(&_test_value)); + EXPECT_EQ(five, AtomicAccess::load(&_test_value)); } void test_fetch_add() { T zero = 0; T five = 5; - Atomic::store(&_test_value, zero); - T value = Atomic::fetch_then_add(&_test_value, five); + AtomicAccess::store(&_test_value, zero); + T value = AtomicAccess::fetch_then_add(&_test_value, five); EXPECT_EQ(zero, value); - EXPECT_EQ(five, Atomic::load(&_test_value)); + EXPECT_EQ(five, AtomicAccess::load(&_test_value)); } }; @@ -72,15 +72,15 @@ TEST_VM(AtomicAddTest, ptr) { uint* five = &_test_values[5]; uint* six = &_test_values[6]; - Atomic::store(&_test_value, zero); - uint* value = Atomic::add(&_test_value, 5); + AtomicAccess::store(&_test_value, zero); + uint* value = AtomicAccess::add(&_test_value, 5); EXPECT_EQ(five, value); - EXPECT_EQ(five, Atomic::load(&_test_value)); + EXPECT_EQ(five, AtomicAccess::load(&_test_value)); - Atomic::store(&_test_value, zero); - value = Atomic::fetch_then_add(&_test_value, 6); + AtomicAccess::store(&_test_value, zero); + value = AtomicAccess::fetch_then_add(&_test_value, 6); EXPECT_EQ(zero, value); - EXPECT_EQ(six, Atomic::load(&_test_value)); + EXPECT_EQ(six, AtomicAccess::load(&_test_value)); }; template @@ -92,10 +92,10 @@ struct AtomicXchgTestSupport { void test() { T zero = 0; T five = 5; - Atomic::store(&_test_value, zero); - T res = Atomic::xchg(&_test_value, five); + AtomicAccess::store(&_test_value, zero); + T res = AtomicAccess::xchg(&_test_value, five); EXPECT_EQ(zero, res); - EXPECT_EQ(five, Atomic::load(&_test_value)); + EXPECT_EQ(five, AtomicAccess::load(&_test_value)); } }; @@ -119,13 +119,13 @@ struct AtomicCmpxchgTestSupport { T zero = 0; T five = 5; T ten = 10; - Atomic::store(&_test_value, zero); - T res = Atomic::cmpxchg(&_test_value, five, ten); + AtomicAccess::store(&_test_value, zero); + T res = AtomicAccess::cmpxchg(&_test_value, five, ten); EXPECT_EQ(zero, res); - EXPECT_EQ(zero, Atomic::load(&_test_value)); - res = Atomic::cmpxchg(&_test_value, zero, ten); + EXPECT_EQ(zero, AtomicAccess::load(&_test_value)); + res = AtomicAccess::cmpxchg(&_test_value, zero, ten); EXPECT_EQ(zero, res); - EXPECT_EQ(ten, Atomic::load(&_test_value)); + EXPECT_EQ(ten, AtomicAccess::load(&_test_value)); } }; @@ -167,10 +167,10 @@ struct AtomicCmpxchg1ByteStressSupport { void test_index(int index) { char one = 1; - Atomic::cmpxchg(&_array[index], _default_val, one); + AtomicAccess::cmpxchg(&_array[index], _default_val, one); validate(_default_val, one, index); - Atomic::cmpxchg(&_array[index], one, _default_val); + AtomicAccess::cmpxchg(&_array[index], one, _default_val); validate(_default_val, _default_val, index); } @@ -194,25 +194,25 @@ struct AtomicEnumTestSupport { AtomicEnumTestSupport() : _test_value{} {} void test_store_load(T value) { - EXPECT_NE(value, Atomic::load(&_test_value)); - Atomic::store(&_test_value, value); - EXPECT_EQ(value, Atomic::load(&_test_value)); + EXPECT_NE(value, AtomicAccess::load(&_test_value)); + AtomicAccess::store(&_test_value, value); + EXPECT_EQ(value, AtomicAccess::load(&_test_value)); } void test_cmpxchg(T value1, T value2) { - EXPECT_NE(value1, Atomic::load(&_test_value)); - Atomic::store(&_test_value, value1); - EXPECT_EQ(value1, Atomic::cmpxchg(&_test_value, value2, value2)); - EXPECT_EQ(value1, Atomic::load(&_test_value)); - EXPECT_EQ(value1, Atomic::cmpxchg(&_test_value, value1, value2)); - EXPECT_EQ(value2, Atomic::load(&_test_value)); + EXPECT_NE(value1, AtomicAccess::load(&_test_value)); + AtomicAccess::store(&_test_value, value1); + EXPECT_EQ(value1, AtomicAccess::cmpxchg(&_test_value, value2, value2)); + EXPECT_EQ(value1, AtomicAccess::load(&_test_value)); + EXPECT_EQ(value1, AtomicAccess::cmpxchg(&_test_value, value1, value2)); + EXPECT_EQ(value2, AtomicAccess::load(&_test_value)); } void test_xchg(T value1, T value2) { - EXPECT_NE(value1, Atomic::load(&_test_value)); - Atomic::store(&_test_value, value1); - EXPECT_EQ(value1, Atomic::xchg(&_test_value, value2)); - EXPECT_EQ(value2, Atomic::load(&_test_value)); + EXPECT_NE(value1, AtomicAccess::load(&_test_value)); + AtomicAccess::store(&_test_value, value1); + EXPECT_EQ(value1, AtomicAccess::xchg(&_test_value, value2)); + EXPECT_EQ(value2, AtomicAccess::load(&_test_value)); } }; @@ -252,57 +252,57 @@ struct AtomicBitopsTestSupport { AtomicBitopsTestSupport() : _test_value(0) {} void fetch_then_and() { - Atomic::store(&_test_value, _old_value); + AtomicAccess::store(&_test_value, _old_value); T expected = _old_value & _change_value; EXPECT_NE(_old_value, expected); - T result = Atomic::fetch_then_and(&_test_value, _change_value); + T result = AtomicAccess::fetch_then_and(&_test_value, _change_value); EXPECT_EQ(_old_value, result); - EXPECT_EQ(expected, Atomic::load(&_test_value)); + EXPECT_EQ(expected, AtomicAccess::load(&_test_value)); } void fetch_then_or() { - Atomic::store(&_test_value, _old_value); + AtomicAccess::store(&_test_value, _old_value); T expected = _old_value | _change_value; EXPECT_NE(_old_value, expected); - T result = Atomic::fetch_then_or(&_test_value, _change_value); + T result = AtomicAccess::fetch_then_or(&_test_value, _change_value); EXPECT_EQ(_old_value, result); - EXPECT_EQ(expected, Atomic::load(&_test_value)); + EXPECT_EQ(expected, AtomicAccess::load(&_test_value)); } void fetch_then_xor() { - Atomic::store(&_test_value, _old_value); + AtomicAccess::store(&_test_value, _old_value); T expected = _old_value ^ _change_value; EXPECT_NE(_old_value, expected); - T result = Atomic::fetch_then_xor(&_test_value, _change_value); + T result = AtomicAccess::fetch_then_xor(&_test_value, _change_value); EXPECT_EQ(_old_value, result); - EXPECT_EQ(expected, Atomic::load(&_test_value)); + EXPECT_EQ(expected, AtomicAccess::load(&_test_value)); } void and_then_fetch() { - Atomic::store(&_test_value, _old_value); + AtomicAccess::store(&_test_value, _old_value); T expected = _old_value & _change_value; EXPECT_NE(_old_value, expected); - T result = Atomic::and_then_fetch(&_test_value, _change_value); + T result = AtomicAccess::and_then_fetch(&_test_value, _change_value); EXPECT_EQ(expected, result); - EXPECT_EQ(expected, Atomic::load(&_test_value)); + EXPECT_EQ(expected, AtomicAccess::load(&_test_value)); } void or_then_fetch() { - Atomic::store(&_test_value, _old_value); + AtomicAccess::store(&_test_value, _old_value); T expected = _old_value | _change_value; EXPECT_NE(_old_value, expected); - T result = Atomic::or_then_fetch(&_test_value, _change_value); + T result = AtomicAccess::or_then_fetch(&_test_value, _change_value); EXPECT_EQ(expected, result); - EXPECT_EQ(expected, Atomic::load(&_test_value)); + EXPECT_EQ(expected, AtomicAccess::load(&_test_value)); } void xor_then_fetch() { - Atomic::store(&_test_value, _old_value); + AtomicAccess::store(&_test_value, _old_value); T expected = _old_value ^ _change_value; EXPECT_NE(_old_value, expected); - T result = Atomic::xor_then_fetch(&_test_value, _change_value); + T result = AtomicAccess::xor_then_fetch(&_test_value, _change_value); EXPECT_EQ(expected, result); - EXPECT_EQ(expected, Atomic::load(&_test_value)); + EXPECT_EQ(expected, AtomicAccess::load(&_test_value)); } #define TEST_BITOP(name) { SCOPED_TRACE(XSTR(name)); name(); } diff --git a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp index a7d8501ffe58d..927b5ff3d42ae 100644 --- a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp +++ b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp @@ -1197,7 +1197,7 @@ class CHTParallelScanTask: public WorkerTask { void work(uint worker_id) { ChtCountScan par_scan; _scan_task->do_safepoint_scan(par_scan); - Atomic::add(_total_scanned, par_scan._count); + AtomicAccess::add(_total_scanned, par_scan._count); } }; diff --git a/test/hotspot/gtest/utilities/test_globalCounter.cpp b/test/hotspot/gtest/utilities/test_globalCounter.cpp index 34003a05b04bb..217c270537829 100644 --- a/test/hotspot/gtest/utilities/test_globalCounter.cpp +++ b/test/hotspot/gtest/utilities/test_globalCounter.cpp @@ -21,7 +21,7 @@ * questions. */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "threadHelper.inline.hpp" #include "utilities/globalCounter.inline.hpp" @@ -44,14 +44,14 @@ TEST_VM(GlobalCounter, critical_section) { wrt_start.signal(); while (!rt_exit) { GlobalCounter::CSContext cs_context = GlobalCounter::critical_section_begin(current); - volatile TestData* read_test = Atomic::load_acquire(_test); - long value = Atomic::load_acquire(&read_test->test_value); + volatile TestData* read_test = AtomicAccess::load_acquire(_test); + long value = AtomicAccess::load_acquire(&read_test->test_value); ASSERT_EQ(value, good_value); GlobalCounter::critical_section_end(current, cs_context); { GlobalCounter::CriticalSection cs(current); - volatile TestData* test = Atomic::load_acquire(_test); - long value = Atomic::load_acquire(&test->test_value); + volatile TestData* test = AtomicAccess::load_acquire(_test); + long value = AtomicAccess::load_acquire(&test->test_value); ASSERT_EQ(value, good_value); } } @@ -61,7 +61,7 @@ TEST_VM(GlobalCounter, critical_section) { TestData* tmp = new TestData(); tmp->test_value = good_value; - Atomic::release_store(&test, tmp); + AtomicAccess::release_store(&test, tmp); rt_exit = false; ttg.doit(); int nw = number_of_readers; @@ -74,7 +74,7 @@ TEST_VM(GlobalCounter, critical_section) { volatile TestData* free_tmp = test; tmp = new TestData(); tmp->test_value = good_value; - Atomic::release_store(&test, tmp); + AtomicAccess::release_store(&test, tmp); GlobalCounter::write_synchronize(); free_tmp->test_value = bad_value; delete free_tmp; diff --git a/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp b/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp index 252edc63094b7..f5cd67d39f01f 100644 --- a/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp +++ b/test/hotspot/gtest/utilities/test_globalCounter_nested.cpp @@ -21,7 +21,7 @@ * questions. */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/os.hpp" #include "utilities/globalCounter.inline.hpp" #include "utilities/spinYield.hpp" @@ -52,21 +52,21 @@ class RCUNestedThread : public JavaTestThread { ~RCUNestedThread() {} void set_state(NestedTestState new_state) { - Atomic::release_store(&_state, new_state); + AtomicAccess::release_store(&_state, new_state); } void wait_with_state(NestedTestState new_state) { SpinYield spinner; - Atomic::release_store(&_state, new_state); - while (!Atomic::load_acquire(&_proceed)) { + AtomicAccess::release_store(&_state, new_state); + while (!AtomicAccess::load_acquire(&_proceed)) { spinner.wait(); } - Atomic::release_store(&_proceed, false); + AtomicAccess::release_store(&_proceed, false); } public: NestedTestState state() const { - return Atomic::load_acquire(&_state); + return AtomicAccess::load_acquire(&_state); } void wait_for_state(NestedTestState goal) { @@ -77,7 +77,7 @@ class RCUNestedThread : public JavaTestThread { } void proceed() { - Atomic::release_store(&_proceed, true); + AtomicAccess::release_store(&_proceed, true); } }; diff --git a/test/hotspot/gtest/utilities/test_lockFreeStack.cpp b/test/hotspot/gtest/utilities/test_lockFreeStack.cpp index 50d862859ac07..3a9d24ad61ec6 100644 --- a/test/hotspot/gtest/utilities/test_lockFreeStack.cpp +++ b/test/hotspot/gtest/utilities/test_lockFreeStack.cpp @@ -22,7 +22,7 @@ */ #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/lockFreeStack.hpp" #include "threadHelper.inline.hpp" @@ -225,21 +225,21 @@ class LockFreeStackTestThread : public JavaTestThread { {} virtual void main_run() { - Atomic::release_store_fence(&_ready, true); + AtomicAccess::release_store_fence(&_ready, true); while (true) { Element* e = _from->pop(); if (e != nullptr) { _to->push(*e); - Atomic::inc(_processed); + AtomicAccess::inc(_processed); ++_local_processed; - } else if (Atomic::load_acquire(_processed) == _process_limit) { + } else if (AtomicAccess::load_acquire(_processed) == _process_limit) { tty->print_cr("thread %u processed %zu", _id, _local_processed); return; } } } - bool ready() const { return Atomic::load_acquire(&_ready); } + bool ready() const { return AtomicAccess::load_acquire(&_ready); } }; TEST_VM(LockFreeStackTest, stress) { diff --git a/test/hotspot/gtest/utilities/test_nonblockingQueue.cpp b/test/hotspot/gtest/utilities/test_nonblockingQueue.cpp index 7871e3038a98b..ae299730f6e1a 100644 --- a/test/hotspot/gtest/utilities/test_nonblockingQueue.cpp +++ b/test/hotspot/gtest/utilities/test_nonblockingQueue.cpp @@ -22,7 +22,7 @@ */ #include "memory/allocation.inline.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/nonblockingQueue.inline.hpp" #include "utilities/pair.hpp" @@ -202,21 +202,21 @@ class NonblockingQueueTestThread : public JavaTestThread { {} virtual void main_run() { - Atomic::release_store_fence(&_ready, true); + AtomicAccess::release_store_fence(&_ready, true); while (true) { Element* e = _from->pop(); if (e != nullptr) { _to->push(*e); - Atomic::inc(_processed); + AtomicAccess::inc(_processed); ++_local_processed; - } else if (Atomic::load_acquire(_processed) == _process_limit) { + } else if (AtomicAccess::load_acquire(_processed) == _process_limit) { tty->print_cr("thread %u processed %zu", _id, _local_processed); return; } } } - bool ready() const { return Atomic::load_acquire(&_ready); } + bool ready() const { return AtomicAccess::load_acquire(&_ready); } }; TEST_VM(NonblockingQueueTest, stress) { diff --git a/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp b/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp index faa3b001c3a95..323908ebd121b 100644 --- a/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp +++ b/test/hotspot/gtest/utilities/test_singleWriterSynchronizer.cpp @@ -22,7 +22,7 @@ * */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/os.hpp" #include "runtime/thread.hpp" @@ -55,14 +55,14 @@ class SingleWriterSynchronizerTestReader : public JavaTestThread { virtual void main_run() { size_t iterations = 0; size_t values_changed = 0; - while (Atomic::load_acquire(_continue_running) != 0) { + while (AtomicAccess::load_acquire(_continue_running) != 0) { { ThreadBlockInVM tbiv(this); } // Safepoint check outside critical section. ++iterations; SingleWriterSynchronizer::CriticalSection cs(_synchronizer); - uintx value = Atomic::load_acquire(_synchronized_value); + uintx value = AtomicAccess::load_acquire(_synchronized_value); uintx new_value = value; for (uint i = 0; i < reader_iterations; ++i) { - new_value = Atomic::load_acquire(_synchronized_value); + new_value = AtomicAccess::load_acquire(_synchronized_value); // A reader can see either the value it first read after // entering the critical section, or that value + 1. No other // values are possible. @@ -96,7 +96,7 @@ class SingleWriterSynchronizerTestWriter : public JavaTestThread { {} virtual void main_run() { - while (Atomic::load_acquire(_continue_running) != 0) { + while (AtomicAccess::load_acquire(_continue_running) != 0) { ++*_synchronized_value; _synchronizer->synchronize(); { ThreadBlockInVM tbiv(this); } // Safepoint check. diff --git a/test/hotspot/gtest/utilities/test_waitBarrier.cpp b/test/hotspot/gtest/utilities/test_waitBarrier.cpp index 6f24299ca2632..0b4a9bd8d2e0d 100644 --- a/test/hotspot/gtest/utilities/test_waitBarrier.cpp +++ b/test/hotspot/gtest/utilities/test_waitBarrier.cpp @@ -21,7 +21,7 @@ * questions. */ -#include "runtime/atomic.hpp" +#include "runtime/atomicAccess.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/spinYield.hpp" @@ -48,9 +48,9 @@ class WBThread : public JavaTestThread { // Similar to how a JavaThread would stop in a safepoint. while (!_exit) { // Load the published tag. - tag = Atomic::load_acquire(&wait_tag); + tag = AtomicAccess::load_acquire(&wait_tag); // Publish the tag this thread is going to wait for. - Atomic::release_store(&_on_barrier, tag); + AtomicAccess::release_store(&_on_barrier, tag); if (_on_barrier == 0) { SpinPause(); continue; @@ -59,9 +59,9 @@ class WBThread : public JavaTestThread { // Wait until we are woken. _wait_barrier->wait(tag); // Verify that we do not see an invalid value. - vv = Atomic::load_acquire(&valid_value); + vv = AtomicAccess::load_acquire(&valid_value); ASSERT_EQ((vv & 0x1), 0); - Atomic::release_store(&_on_barrier, 0); + AtomicAccess::release_store(&_on_barrier, 0); } } }; @@ -103,7 +103,7 @@ class WBArmerThread : public JavaTestThread { // Arm next tag. wb.arm(next_tag); // Publish tag. - Atomic::release_store_fence(&wait_tag, next_tag); + AtomicAccess::release_store_fence(&wait_tag, next_tag); // Wait until threads picked up new tag. while (reader1->_on_barrier != wait_tag || @@ -114,12 +114,12 @@ class WBArmerThread : public JavaTestThread { } // Set an invalid value. - Atomic::release_store(&valid_value, valid_value + 1); // odd + AtomicAccess::release_store(&valid_value, valid_value + 1); // odd os::naked_yield(); // Set a valid value. - Atomic::release_store(&valid_value, valid_value + 1); // even + AtomicAccess::release_store(&valid_value, valid_value + 1); // even // Publish inactive tag. - Atomic::release_store_fence(&wait_tag, 0); // Stores in WB must not float up. + AtomicAccess::release_store_fence(&wait_tag, 0); // Stores in WB must not float up. wb.disarm(); // Wait until threads done valid_value verification.