Skip to content

Commit fb7713d

Browse files
committed
8331714: Make OopMapCache installation lock-free
Backport-of: a2584a8341b2dc9c102abd373a890b2108d3f57e
1 parent a3b14cf commit fb7713d

File tree

3 files changed

+9
-11
lines changed

3 files changed

+9
-11
lines changed

src/hotspot/share/oops/instanceKlass.cpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1618,16 +1618,17 @@ void InstanceKlass::call_class_initializer(TRAPS) {
16181618

16191619
void InstanceKlass::mask_for(const methodHandle& method, int bci,
16201620
InterpreterOopMap* entry_for) {
1621-
// Lazily create the _oop_map_cache at first request
1622-
// Lock-free access requires load_acquire.
1621+
// Lazily create the _oop_map_cache at first request.
1622+
// Load_acquire is needed to safely get instance published with CAS by another thread.
16231623
OopMapCache* oop_map_cache = Atomic::load_acquire(&_oop_map_cache);
16241624
if (oop_map_cache == nullptr) {
1625-
MutexLocker x(OopMapCacheAlloc_lock);
1626-
// Check if _oop_map_cache was allocated while we were waiting for this lock
1627-
if ((oop_map_cache = _oop_map_cache) == nullptr) {
1628-
oop_map_cache = new OopMapCache();
1629-
// Ensure _oop_map_cache is stable, since it is examined without a lock
1630-
Atomic::release_store(&_oop_map_cache, oop_map_cache);
1625+
// Try to install new instance atomically.
1626+
oop_map_cache = new OopMapCache();
1627+
OopMapCache* other = Atomic::cmpxchg(&_oop_map_cache, (OopMapCache*)nullptr, oop_map_cache);
1628+
if (other != nullptr) {
1629+
// Someone else managed to install before us, ditch local copy and use the existing one.
1630+
delete oop_map_cache;
1631+
oop_map_cache = other;
16311632
}
16321633
}
16331634
// _oop_map_cache is constant after init; lookup below does its own locking.

src/hotspot/share/runtime/mutexLocker.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,6 @@ Mutex* tty_lock = nullptr;
101101
Mutex* RawMonitor_lock = nullptr;
102102
Mutex* PerfDataMemAlloc_lock = nullptr;
103103
Mutex* PerfDataManager_lock = nullptr;
104-
Mutex* OopMapCacheAlloc_lock = nullptr;
105104

106105
Mutex* FreeList_lock = nullptr;
107106
Mutex* OldSets_lock = nullptr;
@@ -356,7 +355,6 @@ void mutex_init() {
356355
MUTEX_DEFL(PSOldGenExpand_lock , PaddedMutex , Heap_lock, true);
357356
}
358357
#endif
359-
MUTEX_DEFL(OopMapCacheAlloc_lock , PaddedMutex , Threads_lock, true);
360358
MUTEX_DEFL(Module_lock , PaddedMutex , ClassLoaderDataGraph_lock);
361359
MUTEX_DEFL(SystemDictionary_lock , PaddedMonitor, Module_lock);
362360
MUTEX_DEFL(JNICritical_lock , PaddedMonitor, AdapterHandlerLibrary_lock); // used for JNI critical regions

src/hotspot/share/runtime/mutexLocker.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@ extern Mutex* FullGCALot_lock; // a lock to make FullGCALot MT
9797
extern Mutex* RawMonitor_lock;
9898
extern Mutex* PerfDataMemAlloc_lock; // a lock on the allocator for PerfData memory for performance data
9999
extern Mutex* PerfDataManager_lock; // a long on access to PerfDataManager resources
100-
extern Mutex* OopMapCacheAlloc_lock; // protects allocation of oop_map caches
101100

102101
extern Mutex* FreeList_lock; // protects the free region list during safepoints
103102
extern Mutex* OldSets_lock; // protects the old region sets

0 commit comments

Comments
 (0)