Skip to content

Commit 9e843f5

Browse files
author
Kim Barrett
committed
8367014: Rename class Atomic to AtomicAccess
Reviewed-by: dholmes, aph, stefank
1 parent 5abd184 commit 9e843f5

File tree

428 files changed

+2554
-2552
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

428 files changed

+2554
-2552
lines changed

src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ address BarrierSetAssembler::patching_epoch_addr() {
275275
}
276276

277277
void BarrierSetAssembler::increment_patching_epoch() {
278-
Atomic::inc(&_patching_epoch);
278+
AtomicAccess::inc(&_patching_epoch);
279279
}
280280

281281
void BarrierSetAssembler::clear_patching_epoch() {

src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -112,22 +112,22 @@ class NativeNMethodBarrier {
112112
}
113113

114114
int get_value() {
115-
return Atomic::load_acquire(guard_addr());
115+
return AtomicAccess::load_acquire(guard_addr());
116116
}
117117

118118
void set_value(int value, int bit_mask) {
119119
if (bit_mask == ~0) {
120-
Atomic::release_store(guard_addr(), value);
120+
AtomicAccess::release_store(guard_addr(), value);
121121
return;
122122
}
123123
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
124124
value &= bit_mask;
125-
int old_value = Atomic::load(guard_addr());
125+
int old_value = AtomicAccess::load(guard_addr());
126126
while (true) {
127127
// Only bits in the mask are changed
128128
int new_value = value | (old_value & ~bit_mask);
129129
if (new_value == old_value) break;
130-
int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
130+
int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
131131
if (v == old_value) break;
132132
old_value = v;
133133
}

src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
#include "prims/methodHandles.hpp"
4343
#include "prims/upcallLinker.hpp"
4444
#include "runtime/arguments.hpp"
45-
#include "runtime/atomic.hpp"
45+
#include "runtime/atomicAccess.hpp"
4646
#include "runtime/continuation.hpp"
4747
#include "runtime/continuationEntry.inline.hpp"
4848
#include "runtime/frame.inline.hpp"
@@ -10265,7 +10265,7 @@ class StubGenerator: public StubCodeGenerator {
1026510265

1026610266
#if defined (LINUX) && !defined (__ARM_FEATURE_ATOMICS)
1026710267

10268-
// ARMv8.1 LSE versions of the atomic stubs used by Atomic::PlatformXX.
10268+
// ARMv8.1 LSE versions of the atomic stubs used by AtomicAccess::PlatformXX.
1026910269
//
1027010270
// If LSE is in use, generate LSE versions of all the stubs. The
1027110271
// non-LSE versions are in atomic_aarch64.S.

src/hotspot/cpu/arm/gc/shared/barrierSetNMethod_arm.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,22 +48,22 @@ class NativeNMethodBarrier: public NativeInstruction {
4848

4949
public:
5050
int get_value() {
51-
return Atomic::load_acquire(guard_addr());
51+
return AtomicAccess::load_acquire(guard_addr());
5252
}
5353

5454
void set_value(int value, int bit_mask) {
5555
if (bit_mask == ~0) {
56-
Atomic::release_store(guard_addr(), value);
56+
AtomicAccess::release_store(guard_addr(), value);
5757
return;
5858
}
5959
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
6060
value &= bit_mask;
61-
int old_value = Atomic::load(guard_addr());
61+
int old_value = AtomicAccess::load(guard_addr());
6262
while (true) {
6363
// Only bits in the mask are changed
6464
int new_value = value | (old_value & ~bit_mask);
6565
if (new_value == old_value) break;
66-
int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
66+
int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
6767
if (v == old_value) break;
6868
old_value = v;
6969
}

src/hotspot/cpu/arm/stubGenerator_arm.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -421,7 +421,8 @@ class StubGenerator: public StubCodeGenerator {
421421
}
422422

423423

424-
// As per atomic.hpp the Atomic read-modify-write operations must be logically implemented as:
424+
// As per atomicAccess.hpp the atomic read-modify-write operations must be
425+
// logically implemented as:
425426
// <fence>; <op>; <membar StoreLoad|StoreStore>
426427
// But for load-linked/store-conditional based systems a fence here simply means
427428
// no load/store can be reordered with respect to the initial load-linked, so we have:
@@ -440,7 +441,7 @@ class StubGenerator: public StubCodeGenerator {
440441
// be removed in the future.
441442

442443
// Implementation of atomic_add(jint add_value, volatile jint* dest)
443-
// used by Atomic::add(volatile jint* dest, jint add_value)
444+
// used by AtomicAccess::add(volatile jint* dest, jint add_value)
444445
//
445446
// Arguments :
446447
//
@@ -492,7 +493,7 @@ class StubGenerator: public StubCodeGenerator {
492493
}
493494

494495
// Implementation of jint atomic_xchg(jint exchange_value, volatile jint* dest)
495-
// used by Atomic::add(volatile jint* dest, jint exchange_value)
496+
// used by AtomicAccess::add(volatile jint* dest, jint exchange_value)
496497
//
497498
// Arguments :
498499
//
@@ -542,7 +543,7 @@ class StubGenerator: public StubCodeGenerator {
542543
}
543544

544545
// Implementation of jint atomic_cmpxchg(jint exchange_value, volatile jint *dest, jint compare_value)
545-
// used by Atomic::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
546+
// used by AtomicAccess::cmpxchg(volatile jint *dest, jint compare_value, jint exchange_value)
546547
//
547548
// Arguments :
548549
//
@@ -582,7 +583,7 @@ class StubGenerator: public StubCodeGenerator {
582583
return start;
583584
}
584585

585-
// Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
586+
// Support for jlong AtomicAccess::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
586587
// reordered before by a wrapper to (jlong compare_value, jlong exchange_value, volatile jlong *dest)
587588
//
588589
// Arguments :

src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,15 +73,15 @@ class NativeNMethodBarrier: public NativeInstruction {
7373
u_char buf[NativeMovRegMem::instruction_size];
7474
uint64_t u64;
7575
} new_mov_instr, old_mov_instr;
76-
new_mov_instr.u64 = old_mov_instr.u64 = Atomic::load(instr);
76+
new_mov_instr.u64 = old_mov_instr.u64 = AtomicAccess::load(instr);
7777
while (true) {
7878
// Only bits in the mask are changed
7979
int old_value = nativeMovRegMem_at(old_mov_instr.buf)->offset();
8080
int new_value = value | (old_value & ~bit_mask);
8181
if (new_value == old_value) return; // skip icache flush if nothing changed
8282
nativeMovRegMem_at(new_mov_instr.buf)->set_offset(new_value, false /* no icache flush */);
8383
// Swap in the new value
84-
uint64_t v = Atomic::cmpxchg(instr, old_mov_instr.u64, new_mov_instr.u64, memory_order_relaxed);
84+
uint64_t v = AtomicAccess::cmpxchg(instr, old_mov_instr.u64, new_mov_instr.u64, memory_order_relaxed);
8585
if (v == old_mov_instr.u64) break;
8686
old_mov_instr.u64 = v;
8787
}

src/hotspot/cpu/ppc/nativeInst_ppc.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,7 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
347347
// Finally patch out the jump.
348348
volatile juint *jump_addr = (volatile juint*)instr_addr;
349349
// Release not needed because caller uses invalidate_range after copying the remaining bytes.
350-
//Atomic::release_store(jump_addr, *((juint*)code_buffer));
350+
//AtomicAccess::release_store(jump_addr, *((juint*)code_buffer));
351351
*jump_addr = *((juint*)code_buffer); // atomically store code over branch instruction
352352
ICache::ppc64_flush_icache_bytes(instr_addr, NativeGeneralJump::instruction_size);
353353
}

src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ address BarrierSetAssembler::patching_epoch_addr() {
217217
}
218218

219219
void BarrierSetAssembler::increment_patching_epoch() {
220-
Atomic::inc(&_patching_epoch);
220+
AtomicAccess::inc(&_patching_epoch);
221221
}
222222

223223
void BarrierSetAssembler::clear_patching_epoch() {

src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -106,22 +106,22 @@ class NativeNMethodBarrier {
106106
}
107107

108108
int get_value() {
109-
return Atomic::load_acquire(guard_addr());
109+
return AtomicAccess::load_acquire(guard_addr());
110110
}
111111

112112
void set_value(int value, int bit_mask) {
113113
if (bit_mask == ~0) {
114-
Atomic::release_store(guard_addr(), value);
114+
AtomicAccess::release_store(guard_addr(), value);
115115
return;
116116
}
117117
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
118118
value &= bit_mask;
119-
int old_value = Atomic::load(guard_addr());
119+
int old_value = AtomicAccess::load(guard_addr());
120120
while (true) {
121121
// Only bits in the mask are changed
122122
int new_value = value | (old_value & ~bit_mask);
123123
if (new_value == old_value) break;
124-
int v = Atomic::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
124+
int v = AtomicAccess::cmpxchg(guard_addr(), old_value, new_value, memory_order_release);
125125
if (v == old_value) break;
126126
old_value = v;
127127
}

src/hotspot/cpu/s390/gc/shared/barrierSetNMethod_s390.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,12 @@ class NativeMethodBarrier: public NativeInstruction {
6464
assert((value & ~bit_mask) == 0, "trying to set bits outside the mask");
6565
value &= bit_mask;
6666
int32_t* data_addr = (int32_t*)get_patchable_data_address();
67-
int old_value = Atomic::load(data_addr);
67+
int old_value = AtomicAccess::load(data_addr);
6868
while (true) {
6969
// Only bits in the mask are changed
7070
int new_value = value | (old_value & ~bit_mask);
7171
if (new_value == old_value) break;
72-
int v = Atomic::cmpxchg(data_addr, old_value, new_value, memory_order_release);
72+
int v = AtomicAccess::cmpxchg(data_addr, old_value, new_value, memory_order_release);
7373
if (v == old_value) break;
7474
old_value = v;
7575
}

0 commit comments

Comments
 (0)