Skip to content

Commit

Permalink
Automatic merge of jdk:master into master
Browse files Browse the repository at this point in the history
  • Loading branch information
duke committed Jun 29, 2021
2 parents 2056abe + a977157 commit 0383289
Show file tree
Hide file tree
Showing 6 changed files with 115 additions and 5 deletions.
4 changes: 4 additions & 0 deletions src/hotspot/cpu/aarch64/atomic_aarch64.hpp
Expand Up @@ -45,5 +45,9 @@ extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_1_relaxed_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_relaxed_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_relaxed_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_release_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_release_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_4_seq_cst_impl;
extern aarch64_atomic_stub_t aarch64_atomic_cmpxchg_8_seq_cst_impl;

#endif // CPU_AARCH64_ATOMIC_AARCH64_HPP
22 changes: 22 additions & 0 deletions src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
Expand Up @@ -5956,6 +5956,10 @@ class StubGenerator: public StubCodeGenerator {
acquire = false;
release = false;
break;
case memory_order_release:
acquire = false;
release = true;
break;
default:
acquire = true;
release = true;
Expand Down Expand Up @@ -6037,6 +6041,20 @@ class StubGenerator: public StubCodeGenerator {
(_masm, &aarch64_atomic_cmpxchg_8_relaxed_impl);
gen_cas_entry(MacroAssembler::xword, memory_order_relaxed);

AtomicStubMark mark_cmpxchg_4_release
(_masm, &aarch64_atomic_cmpxchg_4_release_impl);
gen_cas_entry(MacroAssembler::word, memory_order_release);
AtomicStubMark mark_cmpxchg_8_release
(_masm, &aarch64_atomic_cmpxchg_8_release_impl);
gen_cas_entry(MacroAssembler::xword, memory_order_release);

AtomicStubMark mark_cmpxchg_4_seq_cst
(_masm, &aarch64_atomic_cmpxchg_4_seq_cst_impl);
gen_cas_entry(MacroAssembler::word, memory_order_seq_cst);
AtomicStubMark mark_cmpxchg_8_seq_cst
(_masm, &aarch64_atomic_cmpxchg_8_seq_cst_impl);
gen_cas_entry(MacroAssembler::xword, memory_order_seq_cst);

ICache::invalidate_range(first_entry, __ pc() - first_entry);
}
#endif // LINUX
Expand Down Expand Up @@ -7203,6 +7221,10 @@ DEFAULT_ATOMIC_OP(cmpxchg, 8, )
DEFAULT_ATOMIC_OP(cmpxchg, 1, _relaxed)
DEFAULT_ATOMIC_OP(cmpxchg, 4, _relaxed)
DEFAULT_ATOMIC_OP(cmpxchg, 8, _relaxed)
DEFAULT_ATOMIC_OP(cmpxchg, 4, _release)
DEFAULT_ATOMIC_OP(cmpxchg, 8, _release)
DEFAULT_ATOMIC_OP(cmpxchg, 4, _seq_cst)
DEFAULT_ATOMIC_OP(cmpxchg, 8, _seq_cst)

#undef DEFAULT_ATOMIC_OP

Expand Down
33 changes: 29 additions & 4 deletions src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp
Expand Up @@ -27,6 +27,8 @@
#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP
#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP

#include "utilities/debug.hpp"

// Implementation of class atomic
// Note that memory_order_conservative requires a full barrier after atomic stores.
// See https://patchwork.kernel.org/patch/3575821/
Expand Down Expand Up @@ -64,17 +66,40 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest,
T exchange_value,
atomic_memory_order order) const {
STATIC_ASSERT(byte_size == sizeof(T));
if (order == memory_order_relaxed) {
if (order == memory_order_conservative) {
T value = compare_value;
FULL_MEM_BARRIER;
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
FULL_MEM_BARRIER;
return value;
} else {
STATIC_ASSERT (
// The modes that align with C++11 are intended to
// follow the same semantics.
memory_order_relaxed == __ATOMIC_RELAXED &&
memory_order_acquire == __ATOMIC_ACQUIRE &&
memory_order_release == __ATOMIC_RELEASE &&
memory_order_acq_rel == __ATOMIC_ACQ_REL &&
memory_order_seq_cst == __ATOMIC_SEQ_CST);

// Some sanity checking on the memory order. It makes no
// sense to have a release operation for a store that never
// happens.
int failure_memory_order;
switch (order) {
case memory_order_release:
failure_memory_order = memory_order_relaxed; break;
case memory_order_acq_rel:
failure_memory_order = memory_order_acquire; break;
default:
failure_memory_order = order;
}
assert(failure_memory_order <= order, "must be");

T value = compare_value;
FULL_MEM_BARRIER;
__atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
__ATOMIC_RELAXED, __ATOMIC_RELAXED);
FULL_MEM_BARRIER;
order, failure_memory_order);
return value;
}
}
Expand Down
50 changes: 49 additions & 1 deletion src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S
Expand Up @@ -112,7 +112,55 @@ aarch64_atomic_cmpxchg_8_default_impl:
dmb ish
ret

.globl aarch64_atomic_cmpxchg_1_relaxed_default_impl
.globl aarch64_atomic_cmpxchg_4_release_default_impl
.align 5
aarch64_atomic_cmpxchg_4_release_default_impl:
prfm pstl1strm, [x0]
0: ldxr w3, [x0]
cmp w3, w1
b.ne 1f
stlxr w8, w2, [x0]
cbnz w8, 0b
1: mov w0, w3
ret

.globl aarch64_atomic_cmpxchg_8_release_default_impl
.align 5
aarch64_atomic_cmpxchg_8_release_default_impl:
prfm pstl1strm, [x0]
0: ldxr x3, [x0]
cmp x3, x1
b.ne 1f
stlxr w8, x2, [x0]
cbnz w8, 0b
1: mov x0, x3
ret

.globl aarch64_atomic_cmpxchg_4_seq_cst_default_impl
.align 5
aarch64_atomic_cmpxchg_4_seq_cst_default_impl:
prfm pstl1strm, [x0]
0: ldaxr w3, [x0]
cmp w3, w1
b.ne 1f
stlxr w8, w2, [x0]
cbnz w8, 0b
1: mov w0, w3
ret

.globl aarch64_atomic_cmpxchg_8_seq_cst_default_impl
.align 5
aarch64_atomic_cmpxchg_8_seq_cst_default_impl:
prfm pstl1strm, [x0]
0: ldaxr x3, [x0]
cmp x3, x1
b.ne 1f
stlxr w8, x2, [x0]
cbnz w8, 0b
1: mov x0, x3
ret

.globl aarch64_atomic_cmpxchg_1_relaxed_default_impl
.align 5
aarch64_atomic_cmpxchg_1_relaxed_default_impl:
prfm pstl1strm, [x0]
Expand Down
10 changes: 10 additions & 0 deletions src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp
Expand Up @@ -151,6 +151,11 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest,
switch (order) {
case memory_order_relaxed:
stub = aarch64_atomic_cmpxchg_4_relaxed_impl; break;
case memory_order_release:
stub = aarch64_atomic_cmpxchg_4_release_impl; break;
case memory_order_acq_rel:
case memory_order_seq_cst:
stub = aarch64_atomic_cmpxchg_4_seq_cst_impl; break;
default:
stub = aarch64_atomic_cmpxchg_4_impl; break;
}
Expand All @@ -169,6 +174,11 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest,
switch (order) {
case memory_order_relaxed:
stub = aarch64_atomic_cmpxchg_8_relaxed_impl; break;
case memory_order_release:
stub = aarch64_atomic_cmpxchg_8_release_impl; break;
case memory_order_acq_rel:
case memory_order_seq_cst:
stub = aarch64_atomic_cmpxchg_8_seq_cst_impl; break;
default:
stub = aarch64_atomic_cmpxchg_8_impl; break;
}
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/runtime/atomic.hpp
Expand Up @@ -47,6 +47,7 @@ enum atomic_memory_order {
memory_order_acquire = 2,
memory_order_release = 3,
memory_order_acq_rel = 4,
memory_order_seq_cst = 5,
// Strong two-way memory barrier.
memory_order_conservative = 8
};
Expand Down

0 comments on commit 0383289

Please sign in to comment.