Skip to content

Commit

Permalink
8261649: AArch64: Optimize LSE atomics in C++ code
Browse files Browse the repository at this point in the history
Reviewed-by: adinn
  • Loading branch information
Andrew Haley committed Feb 19, 2021
1 parent 61820b7 commit 1b0c36b
Show file tree
Hide file tree
Showing 4 changed files with 240 additions and 115 deletions.
3 changes: 3 additions & 0 deletions src/hotspot/cpu/aarch64/atomic_aarch64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,8 @@ extern aarch64_atomic_stub_t aarch64_atomic_xchg_8_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_relaxed_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_relaxed_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_relaxed_impl;

#endif // CPU_AARCH64_ATOMIC_AARCH64_HPP
237 changes: 159 additions & 78 deletions src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5574,87 +5574,167 @@ class StubGenerator: public StubCodeGenerator {
}

#ifdef LINUX

// ARMv8.1 LSE versions of the atomic stubs used by Atomic::PlatformXX.
//
// If LSE is in use, generate LSE versions of all the stubs. The
// non-LSE versions are in atomic_aarch64.S.
void generate_atomic_entry_points() {

if (! UseLSE) {
return;
// class AtomicStubMark records the entry point of a stub and the
// stub pointer which will point to it. The stub pointer is set to
// the entry point when ~AtomicStubMark() is called, which must be
// after ICache::invalidate_range. This ensures safe publication of
// the generated code.
class AtomicStubMark {
address _entry_point;
aarch64_atomic_stub_t *_stub;
MacroAssembler *_masm;
public:
AtomicStubMark(MacroAssembler *masm, aarch64_atomic_stub_t *stub) {
_masm = masm;
__ align(32);
_entry_point = __ pc();
_stub = stub;
}
~AtomicStubMark() {
*_stub = (aarch64_atomic_stub_t)_entry_point;
}
};

__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "atomic entry points");

__ align(32);
aarch64_atomic_fetch_add_8_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r2, addr = c_rarg0, incr = c_rarg1;
__ atomic_addal(prev, incr, addr);
__ mov(r0, prev);
__ ret(lr);
// NB: For memory_order_conservative we need a trailing membar after
// LSE atomic operations but not a leading membar.
//
// We don't need a leading membar because a clause in the Arm ARM
// says:
//
// Barrier-ordered-before
//
// Barrier instructions order prior Memory effects before subsequent
// Memory effects generated by the same Observer. A read or a write
// RW1 is Barrier-ordered-before a read or a write RW 2 from the same
// Observer if and only if RW1 appears in program order before RW 2
// and [ ... ] at least one of RW 1 and RW 2 is generated by an atomic
// instruction with both Acquire and Release semantics.
//
// All the atomic instructions {ldaddal, swapal, casal} have Acquire
// and Release semantics, therefore we don't need a leading
// barrier. However, there is no corresponding Barrier-ordered-after
// relationship, therefore we need a trailing membar to prevent a
// later store or load from being reordered with the store in an
// atomic instruction.
//
// This was checked by using the herd7 consistency model simulator
// (http://diy.inria.fr/) with this test case:
//
// AArch64 LseCas
// { 0:X1=x; 0:X2=y; 1:X1=x; 1:X2=y; }
// P0 | P1;
// LDR W4, [X2] | MOV W3, #0;
// DMB LD | MOV W4, #1;
// LDR W3, [X1] | CASAL W3, W4, [X1];
// | DMB ISH;
// | STR W4, [X2];
// exists
// (0:X3=0 /\ 0:X4=1)
//
// If X3 == 0 && X4 == 1, the store to y in P1 has been reordered
// with the store to x in P1. Without the DMB in P1 this may happen.
//
// At the time of writing we don't know of any AArch64 hardware that
// reorders stores in this way, but the Reference Manual permits it.

void gen_cas_entry(Assembler::operand_size size,
atomic_memory_order order) {
Register prev = r3, ptr = c_rarg0, compare_val = c_rarg1,
exchange_val = c_rarg2;
bool acquire, release;
switch (order) {
case memory_order_relaxed:
acquire = false;
release = false;
break;
default:
acquire = true;
release = true;
break;
}
__ align(32);
aarch64_atomic_fetch_add_4_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r2, addr = c_rarg0, incr = c_rarg1;
__ atomic_addalw(prev, incr, addr);
__ movw(r0, prev);
__ ret(lr);
__ mov(prev, compare_val);
__ lse_cas(prev, exchange_val, ptr, size, acquire, release, /*not_pair*/true);
if (order == memory_order_conservative) {
__ membar(Assembler::StoreStore|Assembler::StoreLoad);
}
__ align(32);
aarch64_atomic_xchg_4_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r2, addr = c_rarg0, newv = c_rarg1;
__ atomic_xchglw(prev, newv, addr);
if (size == Assembler::xword) {
__ mov(r0, prev);
} else {
__ movw(r0, prev);
__ ret(lr);
}
__ align(32);
aarch64_atomic_xchg_8_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r2, addr = c_rarg0, newv = c_rarg1;
__ atomic_xchgl(prev, newv, addr);
__ ret(lr);
}

void gen_ldaddal_entry(Assembler::operand_size size) {
Register prev = r2, addr = c_rarg0, incr = c_rarg1;
__ ldaddal(size, incr, prev, addr);
__ membar(Assembler::StoreStore|Assembler::StoreLoad);
if (size == Assembler::xword) {
__ mov(r0, prev);
__ ret(lr);
}
__ align(32);
aarch64_atomic_cmpxchg_1_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r3, ptr = c_rarg0, compare_val = c_rarg1,
exchange_val = c_rarg2;
__ cmpxchg(ptr, compare_val, exchange_val,
MacroAssembler::byte,
/*acquire*/false, /*release*/false, /*weak*/false,
prev);
} else {
__ movw(r0, prev);
__ ret(lr);
}
__ align(32);
aarch64_atomic_cmpxchg_4_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r3, ptr = c_rarg0, compare_val = c_rarg1,
exchange_val = c_rarg2;
__ cmpxchg(ptr, compare_val, exchange_val,
MacroAssembler::word,
/*acquire*/false, /*release*/false, /*weak*/false,
prev);
__ ret(lr);
}

void gen_swpal_entry(Assembler::operand_size size) {
Register prev = r2, addr = c_rarg0, incr = c_rarg1;
__ swpal(size, incr, prev, addr);
__ membar(Assembler::StoreStore|Assembler::StoreLoad);
if (size == Assembler::xword) {
__ mov(r0, prev);
} else {
__ movw(r0, prev);
__ ret(lr);
}
__ align(32);
aarch64_atomic_cmpxchg_8_impl = (aarch64_atomic_stub_t)__ pc();
{
Register prev = r3, ptr = c_rarg0, compare_val = c_rarg1,
exchange_val = c_rarg2;
__ cmpxchg(ptr, compare_val, exchange_val,
MacroAssembler::xword,
/*acquire*/false, /*release*/false, /*weak*/false,
prev);
__ mov(r0, prev);
__ ret(lr);
__ ret(lr);
}

void generate_atomic_entry_points() {
if (! UseLSE) {
return;
}

__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "atomic entry points");
address first_entry = __ pc();

// All memory_order_conservative
AtomicStubMark mark_fetch_add_4(_masm, &aarch64_atomic_fetch_add_4_impl);
gen_ldaddal_entry(Assembler::word);
AtomicStubMark mark_fetch_add_8(_masm, &aarch64_atomic_fetch_add_8_impl);
gen_ldaddal_entry(Assembler::xword);

AtomicStubMark mark_xchg_4(_masm, &aarch64_atomic_xchg_4_impl);
gen_swpal_entry(Assembler::word);
AtomicStubMark mark_xchg_8_impl(_masm, &aarch64_atomic_xchg_8_impl);
gen_swpal_entry(Assembler::xword);

// CAS, memory_order_conservative
AtomicStubMark mark_cmpxchg_1(_masm, &aarch64_atomic_cmpxchg_1_impl);
gen_cas_entry(MacroAssembler::byte, memory_order_conservative);
AtomicStubMark mark_cmpxchg_4(_masm, &aarch64_atomic_cmpxchg_4_impl);
gen_cas_entry(MacroAssembler::word, memory_order_conservative);
AtomicStubMark mark_cmpxchg_8(_masm, &aarch64_atomic_cmpxchg_8_impl);
gen_cas_entry(MacroAssembler::xword, memory_order_conservative);

// CAS, memory_order_relaxed
AtomicStubMark mark_cmpxchg_1_relaxed
(_masm, &aarch64_atomic_cmpxchg_1_relaxed_impl);
gen_cas_entry(MacroAssembler::byte, memory_order_relaxed);
AtomicStubMark mark_cmpxchg_4_relaxed
(_masm, &aarch64_atomic_cmpxchg_4_relaxed_impl);
gen_cas_entry(MacroAssembler::word, memory_order_relaxed);
AtomicStubMark mark_cmpxchg_8_relaxed
(_masm, &aarch64_atomic_cmpxchg_8_relaxed_impl);
gen_cas_entry(MacroAssembler::xword, memory_order_relaxed);

ICache::invalidate_range(first_entry, __ pc() - first_entry);
}
#endif // LINUX

Expand Down Expand Up @@ -6772,9 +6852,7 @@ class StubGenerator: public StubCodeGenerator {

#ifdef LINUX

#if 0 // JDK-8261660: disabled for now.
generate_atomic_entry_points();
#endif

#endif // LINUX

Expand Down Expand Up @@ -6805,19 +6883,22 @@ void StubGenerator_generate(CodeBuffer* code, bool all) {
// Define pointers to atomic stubs and initialize them to point to the
// code in atomic_aarch64.S.

#define DEFAULT_ATOMIC_OP(OPNAME, SIZE) \
extern "C" uint64_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## _default_impl \
#define DEFAULT_ATOMIC_OP(OPNAME, SIZE, RELAXED) \
extern "C" uint64_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _default_impl \
(volatile void *ptr, uint64_t arg1, uint64_t arg2); \
aarch64_atomic_stub_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## _impl \
= aarch64_atomic_ ## OPNAME ## _ ## SIZE ## _default_impl;

DEFAULT_ATOMIC_OP(fetch_add, 4)
DEFAULT_ATOMIC_OP(fetch_add, 8)
DEFAULT_ATOMIC_OP(xchg, 4)
DEFAULT_ATOMIC_OP(xchg, 8)
DEFAULT_ATOMIC_OP(cmpxchg, 1)
DEFAULT_ATOMIC_OP(cmpxchg, 4)
DEFAULT_ATOMIC_OP(cmpxchg, 8)
aarch64_atomic_stub_t aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _impl \
= aarch64_atomic_ ## OPNAME ## _ ## SIZE ## RELAXED ## _default_impl;

DEFAULT_ATOMIC_OP(fetch_add, 4, )
DEFAULT_ATOMIC_OP(fetch_add, 8, )
DEFAULT_ATOMIC_OP(xchg, 4, )
DEFAULT_ATOMIC_OP(xchg, 8, )
DEFAULT_ATOMIC_OP(cmpxchg, 1, )
DEFAULT_ATOMIC_OP(cmpxchg, 4, )
DEFAULT_ATOMIC_OP(cmpxchg, 8, )
DEFAULT_ATOMIC_OP(cmpxchg, 1, _relaxed)
DEFAULT_ATOMIC_OP(cmpxchg, 4, _relaxed)
DEFAULT_ATOMIC_OP(cmpxchg, 8, _relaxed)

#undef DEFAULT_ATOMIC_OP

Expand Down
Loading

1 comment on commit 1b0c36b

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.