Skip to content
Browse files
8253464: ARM32 Zero: atomic_copy64 is incorrect, breaking volatile st…

Reviewed-by: aph
  • Loading branch information
shipilev committed Sep 23, 2020
1 parent 0bc01da commit c21690b518c40e7f6674eb5a1a86561335173c3c
Showing with 17 additions and 9 deletions.
  1. +17 −9 src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp
@@ -68,15 +68,23 @@
: "=&f"(tmp), "=Q"(*(volatile double*)dst)
: "Q"(*(volatile double*)src));
#elif defined(__ARM_ARCH_7A__)
// Note that a ldrexd + clrex combination is only needed for
// correctness on the OS level (context-switches). In this
// case, clrex *may* be beneficial for performance. For now
// don't bother with clrex as this is Zero.
jlong tmp;
asm volatile ("ldrexd %0, [%1]\n"
: "=r"(tmp)
: "r"(src), "m"(src));
*(jlong *) dst = tmp;
// The only way to perform the atomic 64-bit load/store
// is to use ldrexd/strexd for both reads and writes.
// For store, we need to have the matching (fake) load first.
// Put clrex between exclusive ops on src and dst for clarity.
uint64_t tmp_r, tmp_w;
uint32_t flag_w;
asm volatile ("ldrexd %[tmp_r], [%[src]]\n"
"ldrexd %[tmp_w], [%[dst]]\n"
"strexd %[flag_w], %[tmp_r], [%[dst]]\n"
"cmp %[flag_w], 0\n"
"bne 1b\n"
: [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w),
[flag_w] "=&r" (flag_w)
: [src] "r" (src), [dst] "r" (dst)
: "cc", "memory");
*(jlong *) dst = *(const jlong *) src;

1 comment on commit c21690b


This comment has been minimized.

Copy link

@bridgekeeper bridgekeeper bot commented on c21690b Sep 23, 2020

Please sign in to comment.