Skip to content

Commit 355755d

Browse files
toxaartxmas92
andcommitted
8366671: Refactor Thread::SpinAcquire and Thread::SpinRelease
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org> Reviewed-by: coleenp, kbarrett, dholmes, aboldtch
1 parent ac81ce5 commit 355755d

File tree

12 files changed

+155
-119
lines changed

12 files changed

+155
-119
lines changed

src/hotspot/share/jfr/recorder/service/jfrEventThrottler.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,11 @@
2424
*/
2525

2626
#include "jfr/recorder/service/jfrEventThrottler.hpp"
27-
#include "jfr/utilities/jfrSpinlockHelper.hpp"
2827
#include "jfrfiles/jfrEventIds.hpp"
2928
#include "logging/log.hpp"
3029
#include "utilities/debug.hpp"
3130
#include "utilities/globalDefinitions.hpp"
31+
#include "utilities/spinCriticalSection.hpp"
3232

3333
constexpr static const JfrSamplerParams _disabled_params = {
3434
0, // sample points per window
@@ -128,7 +128,7 @@ JfrEventThrottler* JfrEventThrottler::create_throttler(JfrEventId id) {
128128
* - period_ms time period expressed in milliseconds
129129
*/
130130
void JfrEventThrottler::configure(int64_t sample_size, int64_t period_ms) {
131-
JfrSpinlockHelper mutex(&_lock);
131+
SpinCriticalSection scs(&_lock);
132132
_sample_size = sample_size;
133133
_period_ms = period_ms;
134134
_update = true;

src/hotspot/share/jfr/support/jfrAdaptiveSampler.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@
2525

2626
#include "jfr/support/jfrAdaptiveSampler.hpp"
2727
#include "jfr/utilities/jfrRandom.inline.hpp"
28-
#include "jfr/utilities/jfrSpinlockHelper.hpp"
2928
#include "jfr/utilities/jfrTime.hpp"
3029
#include "jfr/utilities/jfrTimeConverter.hpp"
3130
#include "jfr/utilities/jfrTryLock.hpp"
3231
#include "logging/log.hpp"
3332
#include "runtime/atomicAccess.hpp"
3433
#include "utilities/globalDefinitions.hpp"
34+
#include "utilities/spinCriticalSection.hpp"
3535

3636
#include <cmath>
3737

@@ -342,7 +342,7 @@ JfrGTestFixedRateSampler::JfrGTestFixedRateSampler(size_t sample_points_per_wind
342342

343343
bool JfrGTestFixedRateSampler::initialize() {
344344
const bool result = JfrAdaptiveSampler::initialize();
345-
JfrSpinlockHelper mutex(&_lock);
345+
SpinCriticalSection scs(&_lock);
346346
reconfigure();
347347
return result;
348348
}

src/hotspot/share/jfr/support/jfrThreadLocal.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
#include "jfr/recorder/storage/jfrStorage.hpp"
3737
#include "jfr/support/jfrThreadId.inline.hpp"
3838
#include "jfr/support/jfrThreadLocal.hpp"
39-
#include "jfr/utilities/jfrSpinlockHelper.hpp"
4039
#include "jfr/writers/jfrJavaEventWriter.hpp"
4140
#include "logging/log.hpp"
4241
#include "memory/allocation.inline.hpp"

src/hotspot/share/jfr/utilities/jfrSpinlockHelper.hpp

Lines changed: 0 additions & 44 deletions
This file was deleted.

src/hotspot/share/runtime/objectMonitor.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@
5959
#include "utilities/globalDefinitions.hpp"
6060
#include "utilities/macros.hpp"
6161
#include "utilities/preserveException.hpp"
62+
#include "utilities/spinCriticalSection.hpp"
6263
#if INCLUDE_JFR
6364
#include "jfr/support/jfrFlush.hpp"
6465
#endif
@@ -1863,9 +1864,10 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
18631864
// returns because of a timeout of interrupt. Contention is exceptionally rare
18641865
// so we use a simple spin-lock instead of a heavier-weight blocking lock.
18651866

1866-
Thread::SpinAcquire(&_wait_set_lock);
1867-
add_waiter(&node);
1868-
Thread::SpinRelease(&_wait_set_lock);
1867+
{
1868+
SpinCriticalSection scs(&_wait_set_lock);
1869+
add_waiter(&node);
1870+
}
18691871

18701872
intx save = _recursions; // record the old recursion count
18711873
_waiters++; // increment the number of waiters
@@ -1922,12 +1924,11 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
19221924
// That is, we fail toward safety.
19231925

19241926
if (node.TState == ObjectWaiter::TS_WAIT) {
1925-
Thread::SpinAcquire(&_wait_set_lock);
1927+
SpinCriticalSection scs(&_wait_set_lock);
19261928
if (node.TState == ObjectWaiter::TS_WAIT) {
19271929
dequeue_specific_waiter(&node); // unlink from wait_set
19281930
node.TState = ObjectWaiter::TS_RUN;
19291931
}
1930-
Thread::SpinRelease(&_wait_set_lock);
19311932
}
19321933

19331934
// The thread is now either on off-list (TS_RUN),
@@ -2036,7 +2037,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
20362037

20372038
bool ObjectMonitor::notify_internal(JavaThread* current) {
20382039
bool did_notify = false;
2039-
Thread::SpinAcquire(&_wait_set_lock);
2040+
SpinCriticalSection scs(&_wait_set_lock);
20402041
ObjectWaiter* iterator = dequeue_waiter();
20412042
if (iterator != nullptr) {
20422043
guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
@@ -2095,7 +2096,6 @@ bool ObjectMonitor::notify_internal(JavaThread* current) {
20952096
}
20962097
}
20972098
}
2098-
Thread::SpinRelease(&_wait_set_lock);
20992099
return did_notify;
21002100
}
21012101

@@ -2198,9 +2198,10 @@ void ObjectMonitor::vthread_wait(JavaThread* current, jlong millis, bool interru
21982198
// returns because of a timeout or interrupt. Contention is exceptionally rare
21992199
// so we use a simple spin-lock instead of a heavier-weight blocking lock.
22002200

2201-
Thread::SpinAcquire(&_wait_set_lock);
2202-
add_waiter(node);
2203-
Thread::SpinRelease(&_wait_set_lock);
2201+
{
2202+
SpinCriticalSection scs(&_wait_set_lock);
2203+
add_waiter(node);
2204+
}
22042205

22052206
node->_recursions = _recursions; // record the old recursion count
22062207
_recursions = 0; // set the recursion level to be 0
@@ -2221,12 +2222,11 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node
22212222
// need to check if we were interrupted or the wait timed-out, and
22222223
// in that case remove ourselves from the _wait_set queue.
22232224
if (node->TState == ObjectWaiter::TS_WAIT) {
2224-
Thread::SpinAcquire(&_wait_set_lock);
2225+
SpinCriticalSection scs(&_wait_set_lock);
22252226
if (node->TState == ObjectWaiter::TS_WAIT) {
22262227
dequeue_specific_waiter(node); // unlink from wait_set
22272228
node->TState = ObjectWaiter::TS_RUN;
22282229
}
2229-
Thread::SpinRelease(&_wait_set_lock);
22302230
}
22312231

22322232
// If this was an interrupted case, set the _interrupted boolean so that

src/hotspot/share/runtime/park.cpp

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include "memory/allocation.inline.hpp"
2626
#include "nmt/memTracker.hpp"
2727
#include "runtime/javaThread.hpp"
28+
#include "utilities/spinCriticalSection.hpp"
2829

2930
// Lifecycle management for TSM ParkEvents.
3031
// ParkEvents are type-stable (TSM).
@@ -60,14 +61,13 @@ ParkEvent * ParkEvent::Allocate (Thread * t) {
6061
// Using a spin lock since we are part of the mutex impl.
6162
// 8028280: using concurrent free list without memory management can leak
6263
// pretty badly it turns out.
63-
Thread::SpinAcquire(&ListLock);
6464
{
65+
SpinCriticalSection scs(&ListLock);
6566
ev = FreeList;
6667
if (ev != nullptr) {
6768
FreeList = ev->FreeNext;
6869
}
6970
}
70-
Thread::SpinRelease(&ListLock);
7171

7272
if (ev != nullptr) {
7373
guarantee (ev->AssociatedWith == nullptr, "invariant") ;
@@ -88,12 +88,11 @@ void ParkEvent::Release (ParkEvent * ev) {
8888
ev->AssociatedWith = nullptr ;
8989
// Note that if we didn't have the TSM/immortal constraint, then
9090
// when reattaching we could trim the list.
91-
Thread::SpinAcquire(&ListLock);
9291
{
92+
SpinCriticalSection scs(&ListLock);
9393
ev->FreeNext = FreeList;
9494
FreeList = ev;
9595
}
96-
Thread::SpinRelease(&ListLock);
9796
}
9897

9998
// Override operator new and delete so we can ensure that the

src/hotspot/share/runtime/safepointVerifiers.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,9 @@
3030

3131
#ifdef ASSERT
3232

33-
NoSafepointVerifier::NoSafepointVerifier(bool active) : _thread(Thread::current()), _active(active) {
33+
NoSafepointVerifier::NoSafepointVerifier(bool active)
34+
: _thread(active ? Thread::current() : nullptr),
35+
_active(active) {
3436
if (!_active) {
3537
return;
3638
}

src/hotspot/share/runtime/thread.cpp

Lines changed: 0 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -566,49 +566,3 @@ bool Thread::set_as_starting_thread(JavaThread* jt) {
566566
DEBUG_ONLY(_starting_thread = jt;)
567567
return os::create_main_thread(jt);
568568
}
569-
570-
// Ad-hoc mutual exclusion primitive: spin lock
571-
//
572-
// We employ a spin lock _only for low-contention, fixed-length
573-
// short-duration critical sections where we're concerned
574-
// about native mutex_t or HotSpot Mutex:: latency.
575-
576-
void Thread::SpinAcquire(volatile int * adr) {
577-
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) {
578-
return; // normal fast-path return
579-
}
580-
581-
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
582-
int ctr = 0;
583-
int Yields = 0;
584-
for (;;) {
585-
while (*adr != 0) {
586-
++ctr;
587-
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
588-
if (Yields > 5) {
589-
os::naked_short_sleep(1);
590-
} else {
591-
os::naked_yield();
592-
++Yields;
593-
}
594-
} else {
595-
SpinPause();
596-
}
597-
}
598-
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return;
599-
}
600-
}
601-
602-
void Thread::SpinRelease(volatile int * adr) {
603-
assert(*adr != 0, "invariant");
604-
// Roach-motel semantics.
605-
// It's safe if subsequent LDs and STs float "up" into the critical section,
606-
// but prior LDs and STs within the critical section can't be allowed
607-
// to reorder or float past the ST that releases the lock.
608-
// Loads and stores in the critical section - which appear in program
609-
// order before the store that releases the lock - must also appear
610-
// before the store that releases the lock in memory visibility order.
611-
// So we need a #loadstore|#storestore "release" memory barrier before
612-
// the ST of 0 into the lock-word which releases the lock.
613-
AtomicAccess::release_store(adr, 0);
614-
}

src/hotspot/share/runtime/thread.hpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -602,11 +602,6 @@ class Thread: public ThreadShadow {
602602
jint _hashStateY;
603603
jint _hashStateZ;
604604

605-
// Low-level leaf-lock primitives used to implement synchronization.
606-
// Not for general synchronization use.
607-
static void SpinAcquire(volatile int * Lock);
608-
static void SpinRelease(volatile int * Lock);
609-
610605
#if defined(__APPLE__) && defined(AARCH64)
611606
private:
612607
DEBUG_ONLY(bool _wx_init);
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#include "runtime/atomicAccess.hpp"
26+
#include "utilities/spinCriticalSection.hpp"
27+
28+
void SpinCriticalSection::spin_acquire(volatile int* adr) {
29+
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) {
30+
return; // normal fast-path return
31+
}
32+
33+
// Slow-path : We've encountered contention -- Spin/Yield/Block strategy.
34+
int ctr = 0;
35+
int Yields = 0;
36+
for (;;) {
37+
while (*adr != 0) {
38+
++ctr;
39+
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
40+
if (Yields > 5) {
41+
os::naked_short_sleep(1);
42+
}
43+
else {
44+
os::naked_yield();
45+
++Yields;
46+
}
47+
}
48+
else {
49+
SpinPause();
50+
}
51+
}
52+
if (AtomicAccess::cmpxchg(adr, 0, 1) == 0) return;
53+
}
54+
}
55+
56+
void SpinCriticalSection::spin_release(volatile int* adr) {
57+
assert(*adr != 0, "invariant");
58+
// Roach-motel semantics.
59+
// It's safe if subsequent LDs and STs float "up" into the critical section,
60+
// but prior LDs and STs within the critical section can't be allowed
61+
// to reorder or float past the ST that releases the lock.
62+
// Loads and stores in the critical section - which appear in program
63+
// order before the store that releases the lock - must also appear
64+
// before the store that releases the lock in memory visibility order.
65+
// So we need a #loadstore|#storestore "release" memory barrier before
66+
// the ST of 0 into the lock-word which releases the lock.
67+
AtomicAccess::release_store(adr, 0);
68+
}
69+

0 commit comments

Comments
 (0)