From bc6148407e629bd99fa5a8577ebd90320610f349 Mon Sep 17 00:00:00 2001 From: Albert Mingkun Yang Date: Tue, 15 Feb 2022 12:23:58 +0000 Subject: [PATCH] 8280136: Serial: Remove unnecessary use of ExpandHeap_lock Reviewed-by: iwalulya, kbarrett, sjohanss --- src/hotspot/share/gc/parallel/mutableSpace.cpp | 2 +- src/hotspot/share/gc/parallel/mutableSpace.hpp | 2 +- src/hotspot/share/gc/parallel/psOldGen.cpp | 15 +++++++-------- src/hotspot/share/gc/serial/defNewGeneration.cpp | 1 - src/hotspot/share/gc/serial/tenuredGeneration.cpp | 3 +-- src/hotspot/share/runtime/mutexLocker.cpp | 10 ++++++++-- src/hotspot/share/runtime/mutexLocker.hpp | 4 +++- 7 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index e28a71e569125..95bd979bcf143 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -217,7 +217,7 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { // Only used by oldgen allocation. bool MutableSpace::needs_expand(size_t word_size) const { - assert_lock_strong(ExpandHeap_lock); + assert_lock_strong(PSOldGenExpand_lock); // Holding the lock means end is stable. So while top may be advancing // via concurrent allocations, there is no need to order the reads of top // and end here, unlike in cas_allocate. diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp index 548b6a4949ee7..289d8997f3974 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.hpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp @@ -145,7 +145,7 @@ class MutableSpace: public CHeapObj { // Return true if this space needs to be expanded in order to satisfy an // allocation request of the indicated size. Concurrent allocations and // resizes may change the result of a later call. Used by oldgen allocator. - // precondition: holding ExpandHeap_lock + // precondition: holding PSOldGenExpand_lock bool needs_expand(size_t word_size) const; // Iteration. diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp index 591341af8619c..3c01fb9eb1712 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.cpp +++ b/src/hotspot/share/gc/parallel/psOldGen.cpp @@ -163,7 +163,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) { assert(word_size > 0, "allocating zero words?"); bool result = true; { - MutexLocker x(ExpandHeap_lock); + MutexLocker x(PSOldGenExpand_lock); // Avoid "expand storms" by rechecking available space after obtaining // the lock, because another thread may have already made sufficient // space available. If insufficient space available, that will remain @@ -181,7 +181,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) { } bool PSOldGen::expand(size_t bytes) { - assert_lock_strong(ExpandHeap_lock); + assert_lock_strong(PSOldGenExpand_lock); assert_locked_or_safepoint(Heap_lock); assert(bytes > 0, "precondition"); const size_t alignment = virtual_space()->alignment(); @@ -219,7 +219,7 @@ bool PSOldGen::expand(size_t bytes) { } bool PSOldGen::expand_by(size_t bytes) { - assert_lock_strong(ExpandHeap_lock); + assert_lock_strong(PSOldGenExpand_lock); assert_locked_or_safepoint(Heap_lock); assert(bytes > 0, "precondition"); bool result = virtual_space()->expand_by(bytes); @@ -255,7 +255,7 @@ bool PSOldGen::expand_by(size_t bytes) { } bool PSOldGen::expand_to_reserved() { - assert_lock_strong(ExpandHeap_lock); + assert_lock_strong(PSOldGenExpand_lock); assert_locked_or_safepoint(Heap_lock); bool result = false; @@ -268,12 +268,11 @@ bool PSOldGen::expand_to_reserved() { } void PSOldGen::shrink(size_t bytes) { - assert_lock_strong(ExpandHeap_lock); + assert_lock_strong(PSOldGenExpand_lock); assert_locked_or_safepoint(Heap_lock); size_t size = align_down(bytes, virtual_space()->alignment()); if (size > 0) { - assert_lock_strong(ExpandHeap_lock); virtual_space()->shrink_by(bytes); post_resize(); @@ -312,11 +311,11 @@ void PSOldGen::resize(size_t desired_free_space) { } if (new_size > current_size) { size_t change_bytes = new_size - current_size; - MutexLocker x(ExpandHeap_lock); + MutexLocker x(PSOldGenExpand_lock); expand(change_bytes); } else { size_t change_bytes = current_size - new_size; - MutexLocker x(ExpandHeap_lock); + MutexLocker x(PSOldGenExpand_lock); shrink(change_bytes); } diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp index dac84ffb78ad7..faec1096cfd05 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.cpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp @@ -288,7 +288,6 @@ void DefNewGeneration::swap_spaces() { } bool DefNewGeneration::expand(size_t bytes) { - MutexLocker x(ExpandHeap_lock); HeapWord* prev_high = (HeapWord*) _virtual_space.high(); bool success = _virtual_space.expand_by(bytes); if (success && ZapUnusedHeapArea) { diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.cpp b/src/hotspot/share/gc/serial/tenuredGeneration.cpp index fd135529157d1..acf19426a9572 100644 --- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp +++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp @@ -196,7 +196,6 @@ TenuredGeneration::expand_and_allocate(size_t word_size, bool is_tlab) { } bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) { - GCMutexLocker x(ExpandHeap_lock); return CardGeneration::expand(bytes, expand_bytes); } @@ -209,7 +208,7 @@ size_t TenuredGeneration::contiguous_available() const { } void TenuredGeneration::assert_correct_size_change_locking() { - assert_locked_or_safepoint(ExpandHeap_lock); + assert_locked_or_safepoint(Heap_lock); } void TenuredGeneration::object_iterate(ObjectClosure* blk) { diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 2d5c34cd21563..d4a5d0795c885 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -54,7 +54,9 @@ Monitor* JNICritical_lock = NULL; Mutex* JvmtiThreadState_lock = NULL; Monitor* EscapeBarrier_lock = NULL; Monitor* Heap_lock = NULL; -Mutex* ExpandHeap_lock = NULL; +#ifdef INCLUDE_PARALLELGC +Mutex* PSOldGenExpand_lock = NULL; +#endif Mutex* AdapterHandlerLibrary_lock = NULL; Mutex* SignatureHandlerLibrary_lock = NULL; Mutex* VtableStubs_lock = NULL; @@ -358,7 +360,11 @@ void mutex_init() { defl(G1OldGCCount_lock , PaddedMonitor, Threads_lock, true); } defl(CompileTaskAlloc_lock , PaddedMutex , MethodCompileQueue_lock); - defl(ExpandHeap_lock , PaddedMutex , Heap_lock, true); +#ifdef INCLUDE_PARALLELGC + if (UseParallelGC) { + defl(PSOldGenExpand_lock , PaddedMutex , Heap_lock, true); + } +#endif defl(OopMapCacheAlloc_lock , PaddedMutex , Threads_lock, true); defl(Module_lock , PaddedMutex , ClassLoaderDataGraph_lock); defl(SystemDictionary_lock , PaddedMonitor, Module_lock); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index cd415e945a578..57e6a3912b516 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -46,7 +46,9 @@ extern Monitor* JNICritical_lock; // a lock used while entering a extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data extern Monitor* EscapeBarrier_lock; // a lock to sync reallocating and relocking objects because of JVMTI access extern Monitor* Heap_lock; // a lock on the heap -extern Mutex* ExpandHeap_lock; // a lock on expanding the heap +#ifdef INCLUDE_PARALLELGC +extern Mutex* PSOldGenExpand_lock; // a lock on expanding the heap +#endif extern Mutex* AdapterHandlerLibrary_lock; // a lock on the AdapterHandlerLibrary extern Mutex* SignatureHandlerLibrary_lock; // a lock on the SignatureHandlerLibrary extern Mutex* VtableStubs_lock; // a lock on the VtableStubs