diff --git a/runtime/gc_base/GCExtensions.cpp b/runtime/gc_base/GCExtensions.cpp index d06e5c84e0e..850944aad5c 100644 --- a/runtime/gc_base/GCExtensions.cpp +++ b/runtime/gc_base/GCExtensions.cpp @@ -320,7 +320,7 @@ MM_GCExtensions::releaseNativesForContinuationObject(MM_EnvironmentBase* env, j9 } bool -MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr) +MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isGlobalGC) { bool needScan = false; #if JAVA_SPEC_VERSION >= 19 @@ -335,12 +335,14 @@ MM_GCExtensions::needScanStacksForContinuationObject(J9VMThread *vmThread, j9obj * * For fully STW GCs, there is no harm to scan them, but it's a waste of time since they are scanned during root scanning already. * - * We don't scan currently scanned either - one scan is enough. + * We don't scan currently scanned for the same collector either - one scan is enough for the same collector, but there could be concurrent scavenger(local collector) and concurrent marking(global collector) overlapping, + * they are irrelevant and both are concurrent, we handle them independently and separately, they are not blocked or ignored each other. + * * we don't scan the continuation object before started and after finished - java stack does not exist. */ if (started && !finished) { Assert_MM_true(NULL != continuation); - needScan = !VM_VMHelpers::isContinuationMountedOrConcurrentlyScanned(continuation); + needScan = !VM_VMHelpers::isContinuationMountedOrConcurrentlyScanned(continuation, isGlobalGC); } #endif /* JAVA_SPEC_VERSION >= 19 */ return needScan; diff --git a/runtime/gc_base/GCExtensions.hpp b/runtime/gc_base/GCExtensions.hpp index 354f8e631f7..e5434fc4d21 100644 --- a/runtime/gc_base/GCExtensions.hpp +++ b/runtime/gc_base/GCExtensions.hpp @@ -303,11 +303,13 @@ class MM_GCExtensions : public MM_GCExtensionsBase { * Check if we need to scan the java stack for the Continuation Object * Used during main scan phase of GC (object graph traversal) or heap object iteration (in sliding compact). * Not meant to be used during root scanning (neither strong roots nor weak roots)! + * * @param[in] vmThread the current J9VMThread * @param[in] continuationObject the continuation object + * @param[in] isGlobalGC * @return true if we need to scan the java stack */ - static bool needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr); + static bool needScanStacksForContinuationObject(J9VMThread *vmThread, j9object_t objectPtr, bool isGlobalGC); /** * Create a GCExtensions object diff --git a/runtime/gc_glue_java/CompactSchemeFixupObject.cpp b/runtime/gc_glue_java/CompactSchemeFixupObject.cpp index 442a25733fc..0a44ead7770 100644 --- a/runtime/gc_glue_java/CompactSchemeFixupObject.cpp +++ b/runtime/gc_glue_java/CompactSchemeFixupObject.cpp @@ -75,13 +75,15 @@ MM_CompactSchemeFixupObject::fixupContinuationNativeSlots(MM_EnvironmentStandard * mounted Virtual threads later during root fixup, we will skip it during this heap fixup pass * (hence passing true for scanOnlyUnmounted parameter). */ - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = true; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4CompactSchemeFixupObject localData; localData.compactSchemeFixupObject = this; localData.env = env; localData.fromObject = objectPtr; + const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCompactScheme, false, false); + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCompactScheme, false, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/gc_glue_java/HeapWalkerDelegate.cpp b/runtime/gc_glue_java/HeapWalkerDelegate.cpp index 3d9ad2eaf81..b4e51489781 100644 --- a/runtime/gc_glue_java/HeapWalkerDelegate.cpp +++ b/runtime/gc_glue_java/HeapWalkerDelegate.cpp @@ -60,7 +60,8 @@ MM_HeapWalkerDelegate::doContinuationNativeSlots(MM_EnvironmentBase *env, omrobj { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = true; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4HeapWalker localData; localData.heapWalker = _heapWalker; localData.env = env; @@ -68,6 +69,8 @@ MM_HeapWalkerDelegate::doContinuationNativeSlots(MM_EnvironmentBase *env, omrobj localData.function = function; localData.userData = userData; /* so far there is no case we need ClassWalk for heapwalker, so we set stackFrameClassWalkNeeded = false */ - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForHeapWalker, false, false); + const bool isConcurrentGC = false; + + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForHeapWalker, false, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/gc_glue_java/MarkingDelegate.cpp b/runtime/gc_glue_java/MarkingDelegate.cpp index 9478c53c3fa..8820e47ff61 100644 --- a/runtime/gc_glue_java/MarkingDelegate.cpp +++ b/runtime/gc_glue_java/MarkingDelegate.cpp @@ -261,7 +261,8 @@ void MM_MarkingDelegate::scanContinuationNativeSlots(MM_EnvironmentBase *env, omrobjectptr_t objectPtr) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = true; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4MarkingDelegate localData; localData.markingDelegate = this; localData.env = env; @@ -271,10 +272,10 @@ MM_MarkingDelegate::scanContinuationNativeSlots(MM_EnvironmentBase *env, omrobje #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool syncWithContinuationMounting = J9_ARE_ANY_BITS_SET(currentThread->privateFlags, J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForMarkingDelegate, stackFrameClassWalkNeeded, false, syncWithContinuationMounting); + bool isConcurrentGC = J9_ARE_ANY_BITS_SET(currentThread->privateFlags, J9_PRIVATE_FLAGS_CONCURRENT_MARK_ACTIVE); + + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForMarkingDelegate, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/gc_glue_java/MetronomeDelegate.cpp b/runtime/gc_glue_java/MetronomeDelegate.cpp index f57b40790e3..4a7f2709d4d 100644 --- a/runtime/gc_glue_java/MetronomeDelegate.cpp +++ b/runtime/gc_glue_java/MetronomeDelegate.cpp @@ -1647,7 +1647,8 @@ void MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J9Object *objectPtr) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = true; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4RealtimeMarkingScheme localData; localData.realtimeMarkingScheme = _markingScheme; localData.env = env; @@ -1657,11 +1658,10 @@ MM_MetronomeDelegate::scanContinuationNativeSlots(MM_EnvironmentRealtime *env, J #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool syncWithContinuationMounting = _realtimeGC->isCollectorConcurrentTracing(); + bool isConcurrentGC = _realtimeGC->isCollectorConcurrentTracing(); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForRealtimeGC, stackFrameClassWalkNeeded, false, syncWithContinuationMounting); + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForRealtimeGC, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/gc_glue_java/ScavengerDelegate.cpp b/runtime/gc_glue_java/ScavengerDelegate.cpp index d39e131be92..2b2f6b67225 100644 --- a/runtime/gc_glue_java/ScavengerDelegate.cpp +++ b/runtime/gc_glue_java/ScavengerDelegate.cpp @@ -345,16 +345,17 @@ MM_ScavengerDelegate::scanContinuationNativeSlots(MM_EnvironmentStandard *env, o bool shouldRemember = false; J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4Scavenge localData; localData.scavengerDelegate = this; localData.env = env; localData.reason = reason; localData.shouldRemember = &shouldRemember; /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool syncWithContinuationMounting = _extensions->isConcurrentScavengerInProgress(); + bool isConcurrentGC = _extensions->isConcurrentScavengerInProgress(); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForScavenge, false, false, syncWithContinuationMounting); + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForScavenge, false, false, isConcurrentGC, isGlobalGC); } return shouldRemember; } diff --git a/runtime/gc_structs/VMThreadStackSlotIterator.cpp b/runtime/gc_structs/VMThreadStackSlotIterator.cpp index 131864bd83f..39322e66b4d 100644 --- a/runtime/gc_structs/VMThreadStackSlotIterator.cpp +++ b/runtime/gc_structs/VMThreadStackSlotIterator.cpp @@ -1,6 +1,6 @@ /******************************************************************************* - * Copyright (c) 1991, 2022 IBM Corp. and others + * Copyright (c) 1991, 2023 IBM Corp. and others * * This program and the accompanying materials are made available under * the terms of the Eclipse Public License 2.0 which accompanies this @@ -137,13 +137,14 @@ GC_VMThreadStackSlotIterator::scanSlots( J9MODRON_OSLOTITERATOR *oSlotIterator, bool includeStackFrameClassReferences, bool trackVisibleFrameDepth, - bool syncWithContinuationMounting + bool isConcurrentGC, + bool isGlobalGC ) { J9StackWalkState stackWalkState; initializeStackWalkState(&stackWalkState, vmThread, userData, oSlotIterator, includeStackFrameClassReferences, trackVisibleFrameDepth); - VM_VMHelpers::walkContinuationStackFramesWrapper(vmThread, continuationObjectPtr, &stackWalkState, syncWithContinuationMounting); + VM_VMHelpers::walkContinuationStackFramesWrapper(vmThread, continuationObjectPtr, &stackWalkState, isConcurrentGC, isGlobalGC); } #if JAVA_SPEC_VERSION >= 19 diff --git a/runtime/gc_structs/VMThreadStackSlotIterator.hpp b/runtime/gc_structs/VMThreadStackSlotIterator.hpp index e07ceddfa88..76830f9f587 100644 --- a/runtime/gc_structs/VMThreadStackSlotIterator.hpp +++ b/runtime/gc_structs/VMThreadStackSlotIterator.hpp @@ -1,6 +1,6 @@ /******************************************************************************* - * Copyright (c) 1991, 2022 IBM Corp. and others + * Copyright (c) 1991, 2023 IBM Corp. and others * * This program and the accompanying materials are made available under * the terms of the Eclipse Public License 2.0 which accompanies this @@ -67,7 +67,8 @@ class GC_VMThreadStackSlotIterator J9MODRON_OSLOTITERATOR *oSlotIterator, bool includeStackFrameClassReferences, bool trackVisibleFrameDepth, - bool syncWithContinuationMounting = false); + bool isConcurrentGC, + bool isGlobalGC); #if JAVA_SPEC_VERSION >= 19 static void scanSlots( diff --git a/runtime/gc_vlhgc/CopyForwardScheme.cpp b/runtime/gc_vlhgc/CopyForwardScheme.cpp index 760a27c6b23..40d8fcef3d1 100644 --- a/runtime/gc_vlhgc/CopyForwardScheme.cpp +++ b/runtime/gc_vlhgc/CopyForwardScheme.cpp @@ -2322,7 +2322,8 @@ MMINLINE void MM_CopyForwardScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, MM_AllocationContextTarok *reservingContext, J9Object *objectPtr, ScanReason reason) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = false; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4CopyForward localData; localData.copyForwardScheme = this; localData.env = env; @@ -2332,7 +2333,9 @@ MM_CopyForwardScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, MM_A #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCopyForwardScheme, stackFrameClassWalkNeeded, false); + const bool isConcurrentGC = false; + + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForCopyForwardScheme, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp b/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp index f30d67aaf52..4004f75a8da 100644 --- a/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp +++ b/runtime/gc_vlhgc/GlobalMarkCardScrubber.cpp @@ -192,14 +192,16 @@ bool MM_GlobalMarkCardScrubber::scrubContinuationNativeSlots(MM_EnvironmentVLHGC { bool doScrub = true; J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = true; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4GlobalMarkCardScrubber localData; localData.globalMarkCardScrubber = this; localData.env = env; localData.doScrub = &doScrub; localData.fromObject = objectPtr; + const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkCardScrubber, false, false); + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkCardScrubber, false, false, isConcurrentGC, isGlobalGC); } return doScrub; } diff --git a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp index 532c258e825..765c2f7d9e1 100644 --- a/runtime/gc_vlhgc/GlobalMarkingScheme.cpp +++ b/runtime/gc_vlhgc/GlobalMarkingScheme.cpp @@ -792,7 +792,8 @@ void MM_GlobalMarkingScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, J9Object *objectPtr, ScanReason reason) { J9VMThread *currentThread = (J9VMThread *)env->getLanguageVMThread(); - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = true; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4GlobalMarkingScheme localData; localData.globalMarkingScheme = this; localData.env = env; @@ -801,11 +802,10 @@ MM_GlobalMarkingScheme::scanContinuationNativeSlots(MM_EnvironmentVLHGC *env, J9 #if defined(J9VM_GC_DYNAMIC_CLASS_UNLOADING) stackFrameClassWalkNeeded = isDynamicClassUnloadingEnabled(); #endif /* J9VM_GC_DYNAMIC_CLASS_UNLOADING */ - /* In STW GC there are no racing carrier threads doing mount and no need for the synchronization. */ - bool syncWithContinuationMounting = (MM_VLHGCIncrementStats::mark_concurrent == static_cast(env->_cycleState)->_vlhgcIncrementStats._globalMarkIncrementType); + bool isConcurrentGC = (MM_VLHGCIncrementStats::mark_concurrent == static_cast(env->_cycleState)->_vlhgcIncrementStats._globalMarkIncrementType); - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkingScheme, stackFrameClassWalkNeeded, false, syncWithContinuationMounting); + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForGlobalMarkingScheme, stackFrameClassWalkNeeded, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/gc_vlhgc/WriteOnceCompactor.cpp b/runtime/gc_vlhgc/WriteOnceCompactor.cpp index 2dd22f27654..013d2a2b2cb 100644 --- a/runtime/gc_vlhgc/WriteOnceCompactor.cpp +++ b/runtime/gc_vlhgc/WriteOnceCompactor.cpp @@ -1240,13 +1240,15 @@ MM_WriteOnceCompactor::fixupContinuationNativeSlots(MM_EnvironmentVLHGC* env, J9 * mounted Virtual threads later during root fixup, we will skip it during this heap fixup pass * (hence passing true for scanOnlyUnmounted parameter). */ - if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr)) { + const bool isGlobalGC = MM_CycleState::CT_GLOBAL_GARBAGE_COLLECTION == env->_cycleState->_collectionType; + if (MM_GCExtensions::needScanStacksForContinuationObject(currentThread, objectPtr, isGlobalGC)) { StackIteratorData4WriteOnceCompactor localData; localData.writeOnceCompactor = this; localData.env = env; localData.fromObject = objectPtr; + const bool isConcurrentGC = false; - GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForWriteOnceCompactor, false, false); + GC_VMThreadStackSlotIterator::scanSlots(currentThread, objectPtr, (void *)&localData, stackSlotIteratorForWriteOnceCompactor, false, false, isConcurrentGC, isGlobalGC); } } diff --git a/runtime/oti/VMHelpers.hpp b/runtime/oti/VMHelpers.hpp index 8009cf4ae39..f6e2ec085c5 100644 --- a/runtime/oti/VMHelpers.hpp +++ b/runtime/oti/VMHelpers.hpp @@ -2050,16 +2050,33 @@ class VM_VMHelpers } #if JAVA_SPEC_VERSION >= 19 - static VMINLINE J9VMThread * - getCarrierThreadFromContinuationState(uintptr_t continuationState) + static VMINLINE uintptr_t getConcurrentGCMask(bool isGlobalGC) { - return (J9VMThread *)(continuationState & (~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN)); + if (isGlobalGC) { + return J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL; + } else { + return J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL; + } } + /** + * Check if the related J9VMContinuation is concurrently scaned from the state + * 2 variants with and without bool isGlobalGC param. + * without isGlobalGC param, return true if it is either local concurrent scanning case or global concurrent scanning case. + * with isGlobalGC param, if isGlobalGC == true, only check if global concurrent scanning case. + * if isGlobalGC == false, only check if local concurrent scanning case. + */ static VMINLINE bool isConcurrentlyScannedFromContinuationState(uintptr_t continuationState) { - return J9_ARE_ANY_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN); + return J9_ARE_ANY_BITS_SET(continuationState, J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY); + } + + static VMINLINE bool + isConcurrentlyScannedFromContinuationState(uintptr_t continuationState, bool isGlobalGC) + { + uintptr_t concurrentGCMask = getConcurrentGCMask(isGlobalGC); + return J9_ARE_ANY_BITS_SET(continuationState, concurrentGCMask); } /** @@ -2070,57 +2087,86 @@ class VM_VMHelpers static VMINLINE bool isContinuationMounted(J9VMContinuation *continuation) { - return J9_ARE_ANY_BITS_SET(continuation->state, ~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN); + return J9_ARE_ANY_BITS_SET(continuation->state, ~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY); + } + + static VMINLINE J9VMThread * + getCarrierThreadFromContinuationState(uintptr_t continuationState) + { + return (J9VMThread *)(continuationState & (~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY)); } static VMINLINE bool - isContinuationMountedOrConcurrentlyScanned(J9VMContinuation *continuation) + isContinuationMountedOrConcurrentlyScanned(J9VMContinuation *continuation, bool isGlobalGC) { - return isContinuationMounted(continuation) || isConcurrentlyScannedFromContinuationState(continuation->state); + return isContinuationMounted(continuation) || isConcurrentlyScannedFromContinuationState(continuation->state, isGlobalGC); } /* - * If low tagging failed due to either + * + * param[in] checkConcurrentState can be J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL or J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL + * + * If WinningConcurrentGCScan set J9_GC_CONTINUATION_bit0:STATE_CONCURRENT_SCAN_LOCAL or bit1:J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL in the state base on checkConcurrentState + * If low tagging(bit0 or bit1) failed due to either * * a carrier thread winning to mount, we don't need to do anything, since it will be compensated by pre/post mount actions - * another GC thread winning to scan, again don't do anything, and let the winning thread do the work, instead + * another GC thread winning to scan(bit0/bit1,bit0 and bit1 is irrelevant and independent), again don't do anything, and let the winning thread do the work, instead */ static VMINLINE bool - tryWinningConcurrentGCScan(J9VMContinuation *continuation) + tryWinningConcurrentGCScan(J9VMContinuation *continuation, bool isGlobalGC) { - return J9_GC_CONTINUATION_STATE_INITIAL == VM_AtomicSupport::lockCompareExchange(&continuation->state, J9_GC_CONTINUATION_STATE_INITIAL, J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN); + uintptr_t complementGCConcurrentState = J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE; + uintptr_t returnedState = J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE; + do { + /* preserve the concurrent GC state for the other type of GC */ + complementGCConcurrentState = continuation->state & getConcurrentGCMask(!isGlobalGC); + returnedState = VM_AtomicSupport::lockCompareExchange(&continuation->state, complementGCConcurrentState, complementGCConcurrentState | getConcurrentGCMask(isGlobalGC)); + /* if the other GC happened to change its concurrentGC state since us taking a snapshot of their state, we'll have to retry */ + } while (complementGCConcurrentState != (returnedState & complementGCConcurrentState)); + + /* if returned state does not contain carrier ID, return that we won */ + return (complementGCConcurrentState == returnedState); } + /** + * clear CONCURRENTSCANNING flag bit0:for LocalConcurrentScanning /bit1:for GlobalConcurrentScanning base on checkConcurrentState, + * if all CONCURRENTSCANNING bits(bit0 and bit1) are cleared and the continuation mounting is blocked by concurrent scanning, notify it. + * @param [in] checkConcurrentState can be J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL or J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL + */ static VMINLINE void - exitConcurrentGCScan(J9VMContinuation *continuation) - { - /* clear CONCURRENTSCANNING flag */ - uintptr_t oldContinuationState = VM_AtomicSupport::bitAnd(&continuation->state, ~(uintptr_t)J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN); - J9VMThread *carrierThread = getCarrierThreadFromContinuationState(oldContinuationState); - if (NULL != carrierThread) { - omrthread_monitor_enter(carrierThread->publicFlagsMutex); - /* notify the waiting carrierThread that we just finished scanning, and it can proceed with mounting. */ - omrthread_monitor_notify_all(carrierThread->publicFlagsMutex); - omrthread_monitor_exit(carrierThread->publicFlagsMutex); + exitConcurrentGCScan(J9VMContinuation *continuation, bool isGlobalGC) + { + /* clear CONCURRENTSCANNING flag bit0:LocalConcurrentScanning /bit1:GlobalConcurrentScanning */ + uintptr_t oldContinuationState = VM_AtomicSupport::bitAnd(&continuation->state, ~getConcurrentGCMask(isGlobalGC)); + uintptr_t complementGCConcurrentState = oldContinuationState & getConcurrentGCMask(!isGlobalGC); + if (!complementGCConcurrentState) { + J9VMThread *carrierThread = getCarrierThreadFromContinuationState(oldContinuationState); + if (NULL != carrierThread) { + omrthread_monitor_enter(carrierThread->publicFlagsMutex); + /* notify the waiting carrierThread that we just finished scanning and we were the only/last GC to scan it, so that it can proceed with mounting. */ + omrthread_monitor_notify_all(carrierThread->publicFlagsMutex); + omrthread_monitor_exit(carrierThread->publicFlagsMutex); + } } } + #endif /* JAVA_SPEC_VERSION >= 19 */ static VMINLINE UDATA - walkContinuationStackFramesWrapper(J9VMThread *vmThread, j9object_t continuationObject, J9StackWalkState *walkState, bool syncWithContinuationMounting) + walkContinuationStackFramesWrapper(J9VMThread *vmThread, j9object_t continuationObject, J9StackWalkState *walkState, bool isConcurrentGC, bool isGlobalGC) { UDATA rc = J9_STACKWALK_RC_NONE; #if JAVA_SPEC_VERSION >= 19 J9VMContinuation *continuation = J9VMJDKINTERNALVMCONTINUATION_VMREF(vmThread, continuationObject); - if (syncWithContinuationMounting && (NULL != continuation)) { - if (!tryWinningConcurrentGCScan(continuation)) { - /* If continuation is mounted or already being scanned by another GC thread, we do nothing */ + if (isConcurrentGC && (NULL != continuation)) { + if (!tryWinningConcurrentGCScan(continuation, isGlobalGC)) { + /* if continuation is mounted or already being scanned by another GC thread of the same GC type, we do nothing */ return rc; } } rc = vmThread->javaVM->internalVMFunctions->walkContinuationStackFrames(vmThread, continuation, walkState); - if (syncWithContinuationMounting && (NULL != continuation)) { - exitConcurrentGCScan(continuation); + if (isConcurrentGC && (NULL != continuation)) { + exitConcurrentGCScan(continuation, isGlobalGC); } #endif /* JAVA_SPEC_VERSION >= 19 */ return rc; diff --git a/runtime/oti/j9consts.h b/runtime/oti/j9consts.h index 636567d5296..68745425bb9 100644 --- a/runtime/oti/j9consts.h +++ b/runtime/oti/j9consts.h @@ -492,8 +492,10 @@ extern "C" { #define J9_GC_MARK_MAP_LOG_SIZEOF_UDATA 0x5 #define J9_GC_MARK_MAP_UDATA_MASK 0x1F #endif /* J9VM_ENV_DATA64 */ -#define J9_GC_CONTINUATION_STATE_INITIAL 0 -#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN 0x1 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE 0 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL 0x1 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL 0x2 +#define J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_ANY (J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL | J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL) #define J9VMGC_SIZECLASSES_MIN 0x1 #define J9VMGC_SIZECLASSES_MIN_SMALL 0x1 diff --git a/runtime/oti/j9nonbuilder.h b/runtime/oti/j9nonbuilder.h index 3e14b78b5bc..5eff8b5f28e 100644 --- a/runtime/oti/j9nonbuilder.h +++ b/runtime/oti/j9nonbuilder.h @@ -5033,7 +5033,7 @@ typedef struct J9VMContinuation { struct J9JITGPRSpillArea jitGPRs; struct J9I2JState i2jState; struct J9VMEntryLocalStorage* oldEntryLocalStorage; - volatile UDATA state; /* it's a bit-wise struct of CarrierThread ID and ConcurrentlyScanned flag */ + volatile UDATA state; /* it's a bit-wise struct of CarrierThread ID and ConcurrentlyScanned flag bit0:localConcurrentScan-J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_LOCAL, bit1:globalConcurrentScan-J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_GLOBAL */ UDATA dropFlags; } J9VMContinuation; #endif /* JAVA_SPEC_VERSION >= 19 */ diff --git a/runtime/vm/ContinuationHelpers.cpp b/runtime/vm/ContinuationHelpers.cpp index a0bb1bf0a87..7ae1d279e5d 100644 --- a/runtime/vm/ContinuationHelpers.cpp +++ b/runtime/vm/ContinuationHelpers.cpp @@ -198,7 +198,7 @@ yieldContinuation(J9VMThread *currentThread) * must be maintained for weakly ordered CPUs, to unsure that once the continuation is again available for GC scan (on potentially remote CPUs), all CPUs see up-to-date stack . */ Assert_VM_true((uintptr_t)currentThread == continuation->state); - continuation->state = J9_GC_CONTINUATION_STATE_INITIAL; + continuation->state = J9_GC_CONTINUATION_STATE_CONCURRENT_SCAN_NONE; return result; }