From ecd7b42db2392bee35038ee41567186230a7f6df Mon Sep 17 00:00:00 2001 From: Salman Rana Date: Tue, 25 Apr 2023 02:37:29 -0400 Subject: [PATCH] GC Scheduler Cleanup - General formatting clean up for Scheduler - Follow up to https://github.com/eclipse-openj9/openj9/pull/17229: revert intermediate changes (run method) and move recomputeActiveThreadCountForTask to prepareThreadsForTask Signed-off-by: Salman Rana --- runtime/gc_realtime/Scheduler.cpp | 106 +++++++++++++++--------------- runtime/gc_realtime/Scheduler.hpp | 64 +++++++++--------- 2 files changed, 84 insertions(+), 86 deletions(-) diff --git a/runtime/gc_realtime/Scheduler.cpp b/runtime/gc_realtime/Scheduler.cpp index 5cf5f761f79..cc8f1a1000c 100644 --- a/runtime/gc_realtime/Scheduler.cpp +++ b/runtime/gc_realtime/Scheduler.cpp @@ -50,10 +50,10 @@ * @ingroup GC_Metronome methodGroup */ MM_Scheduler* -MM_Scheduler::newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void* handler_arg, uintptr_t defaultOSStackSize) +MM_Scheduler::newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void *handler_arg, uintptr_t defaultOSStackSize) { MM_Scheduler *scheduler = (MM_Scheduler *)env->getForge()->allocate(sizeof(MM_Scheduler), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE()); - if (scheduler) { + if (NULL != scheduler) { new(scheduler) MM_Scheduler(env, handler, handler_arg, defaultOSStackSize); if (!scheduler->initialize(env)) { scheduler->kill(env); @@ -82,21 +82,26 @@ MM_Scheduler::kill(MM_EnvironmentBase *env) void MM_Scheduler::tearDown(MM_EnvironmentBase *env) { - if (_mainThreadMonitor) { + if (NULL != _mainThreadMonitor) { omrthread_monitor_destroy(_mainThreadMonitor); + _mainThreadMonitor = NULL; } + if (NULL != _threadResumedTable) { env->getForge()->free(_threadResumedTable); _threadResumedTable = NULL; } + if (NULL != _utilTracker) { _utilTracker->kill(env); + _utilTracker = NULL; } + MM_ParallelDispatcher::kill(env); } uintptr_t -MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, I_32 keyBufferSize, char *valueBuffer, I_32 valueBufferSize) +MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, int32_t keyBufferSize, char *valueBuffer, int32_t valueBufferSize) { OMRPORT_ACCESS_FROM_OMRVM(_vm); switch (which) { @@ -106,8 +111,8 @@ MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, I_32 keyBufferSize, case 1: { omrstr_printf(keyBuffer, keyBufferSize, "Scheduling Method"); - I_32 len = (I_32)omrstr_printf(valueBuffer, valueBufferSize, "TIME_BASED with "); - while (_alarmThread == NULL || _alarmThread->_alarm == NULL) { + int32_t len = (int32_t)omrstr_printf(valueBuffer, valueBufferSize, "TIME_BASED with "); + while ((NULL == _alarmThread) || (NULL == _alarmThread->_alarm)) { /* Wait for GC to finish initializing */ omrthread_sleep(100); } @@ -155,11 +160,15 @@ MM_Scheduler::showParameters(MM_EnvironmentBase *env) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); omrtty_printf("****************************************************************************\n"); - for (uintptr_t which=0; ; which++) { + for (uintptr_t which = 0; ; which++) { char keyBuffer[256], valBuffer[256]; uintptr_t rc = getParameter(which, keyBuffer, sizeof(keyBuffer), valBuffer, sizeof(valBuffer)); - if (rc == 0) { break; } - if (rc == 1) { omrtty_printf("%s: %s\n", keyBuffer, valBuffer); } + if (rc == 0) { + break; + } + if (rc == 1) { + omrtty_printf("%s: %s\n", keyBuffer, valBuffer); + } } omrtty_printf("****************************************************************************\n"); } @@ -222,13 +231,13 @@ MM_Scheduler::initialize(MM_EnvironmentBase *env) if (METRONOME_DEFAULT_BEAT_MICRO < _extensions->beatMicro) { uintptr_t intervalToSkipYieldCheckMicro = _extensions->beatMicro - METRONOME_DEFAULT_BEAT_MICRO; uintptr_t maxInterYieldTimeMicro = INTER_YIELD_MAX_NS / 1000; - _extensions->distanceToYieldTimeCheck = (U_32)(intervalToSkipYieldCheckMicro / maxInterYieldTimeMicro); + _extensions->distanceToYieldTimeCheck = (uint32_t)(intervalToSkipYieldCheckMicro / maxInterYieldTimeMicro); } /* Show GC parameters here before we enter real execution */ _window = _extensions->timeWindowMicro / 1e6; _beat = _extensions->beatMicro / 1e6; - _beatNanos = (U_64) (_extensions->beatMicro * 1e3); + _beatNanos = (uint64_t) (_extensions->beatMicro * 1e3); _staticTargetUtilization = _extensions->targetUtilizationPercentage / 1e2; _utilTracker = MM_UtilizationTracker::newInstance(env, _window, _beatNanos, _staticTargetUtilization); if (NULL == _utilTracker) { @@ -327,13 +336,13 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t * Make sure _completeCurrentGCSynchronously and _mode are atomically changed. */ omrthread_monitor_enter(_mainThreadMonitor); - switch(reason) { + switch (reason) { case OUT_OF_MEMORY_TRIGGER: /* For now we assume that OUT_OF_MEMORY trigger means perform * a synchronous GC, but maybe we want a mode where we try one * more time slice before degrading to synchronous. */ - if(!_extensions->synchronousGCOnOOM) { + if (!_extensions->synchronousGCOnOOM) { break; } /* fall through */ @@ -347,7 +356,7 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t break; default: /* WORK_TRIGGER or TIME_TRIGGER */ { - if(_threadWaitingOnMainThreadMonitor != NULL) { + if (NULL != _threadWaitingOnMainThreadMonitor) { /* Check your timer again incase another thread beat you to checking for shouldMutatorDoubleBeat */ if (env->getTimer()->hasTimeElapsed(getStartTimeOfCurrentMutatorSlice(), _beatNanos)) { if (shouldMutatorDoubleBeat(_threadWaitingOnMainThreadMonitor, env->getTimer())) { @@ -367,7 +376,7 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t break; } } - if(_threadWaitingOnMainThreadMonitor == NULL) { + if (NULL == _threadWaitingOnMainThreadMonitor) { /* * The gc thread(s) are already awake and collecting (otherwise, the main * gc thread would be waiting on the monitor). @@ -423,7 +432,7 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t uintptr_t MM_Scheduler::getTaskThreadCount(MM_EnvironmentBase *env) { - if (env->_currentTask == NULL) { + if (NULL == env->_currentTask) { return 1; } return env->_currentTask->getThreadCount(); @@ -436,7 +445,7 @@ MM_Scheduler::waitForMutatorsToStop(MM_EnvironmentRealtime *env) OMRPORT_ACCESS_FROM_ENVIRONMENT(env); /* we need to record how long it took to wait for the mutators to stop */ - U_64 exclusiveAccessTime = omrtime_hires_clock(); + uint64_t exclusiveAccessTime = omrtime_hires_clock(); /* The time before acquiring exclusive VM access is charged to the mutator but the time * during the acquisition is conservatively charged entirely to the GC. */ @@ -451,7 +460,7 @@ MM_Scheduler::waitForMutatorsToStop(MM_EnvironmentRealtime *env) * TODO: This approach is just to fix some timing holes in shutdown. Consider removing this * "if" statement and fix alarm thread not to die before requesting exclusive access for us. */ - if (_mainThreadMustShutDown && _mode != WAKING_GC) { + if (_mainThreadMustShutDown && (WAKING_GC != _mode)) { uintptr_t gcPriority = 0; _gc->getRealtimeDelegate()->requestExclusiveVMAccess(env, TRUE /* block */, &gcPriority); _gc->setGCThreadPriority(env->getOmrVMThread(), gcPriority); @@ -496,7 +505,7 @@ MM_Scheduler::shouldGCDoubleBeat(MM_EnvironmentRealtime *env) if (targetUtilization <= 0.0) { return true; } - I_32 maximumAllowedConsecutiveBeats = (I_32) (1.0 / targetUtilization); + int32_t maximumAllowedConsecutiveBeats = (int32_t) (1.0 / targetUtilization); if (_currentConsecutiveBeats >= maximumAllowedConsecutiveBeats) { return false; } @@ -526,10 +535,10 @@ MM_Scheduler::reportStartGCIncrement(MM_EnvironmentRealtime *env) { OMRPORT_ACCESS_FROM_ENVIRONMENT(env); - if(_completeCurrentGCSynchronously) { + if (_completeCurrentGCSynchronously) { _completeCurrentGCSynchronouslyMainThreadCopy = true; - U_64 exclusiveAccessTimeMicros = 0; - U_64 meanExclusiveAccessIdleTimeMicros = 0; + uint64_t exclusiveAccessTimeMicros = 0; + uint64_t meanExclusiveAccessIdleTimeMicros = 0; Trc_MM_SystemGCStart(env->getLanguageVMThread(), _extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_NEW), @@ -543,10 +552,10 @@ MM_Scheduler::reportStartGCIncrement(MM_EnvironmentRealtime *env) exclusiveAccessTimeMicros = omrtime_hires_delta(0, env->getExclusiveAccessTime(), OMRPORT_TIME_DELTA_IN_MICROSECONDS); meanExclusiveAccessIdleTimeMicros = omrtime_hires_delta(0, env->getMeanExclusiveAccessIdleTime(), OMRPORT_TIME_DELTA_IN_MICROSECONDS); Trc_MM_ExclusiveAccess(env->getLanguageVMThread(), - (U_32)(exclusiveAccessTimeMicros / 1000), - (U_32)(exclusiveAccessTimeMicros % 1000), - (U_32)(meanExclusiveAccessIdleTimeMicros / 1000), - (U_32)(meanExclusiveAccessIdleTimeMicros % 1000), + (uint32_t)(exclusiveAccessTimeMicros / 1000), + (uint32_t)(exclusiveAccessTimeMicros % 1000), + (uint32_t)(meanExclusiveAccessIdleTimeMicros / 1000), + (uint32_t)(meanExclusiveAccessIdleTimeMicros % 1000), env->getExclusiveAccessHaltedThreads(), env->getLastExclusiveAccessResponder(), env->exclusiveAccessBeatenByOtherThread()); @@ -637,7 +646,7 @@ MM_Scheduler::restartMutatorsAndWait(MM_EnvironmentRealtime *env) } bool -MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack) +MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, uint64_t timeSlack) { return internalShouldGCYield(env, timeSlack); } @@ -652,14 +661,14 @@ MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack) * @return true if the GC thread should yield, false otherwise */ MMINLINE bool -MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack) +MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, uint64_t timeSlack) { if (_completeCurrentGCSynchronouslyMainThreadCopy) { /* If we have degraded to a synchronous GC, don't yield until finished */ return false; } /* Be harmless when called indirectly on mutator thread */ - if (env->getThreadType() == MUTATOR_THREAD) { + if (MUTATOR_THREAD == env->getThreadType()) { return false; } /* The GC does not have to yield when ConcurrentTracing or ConcurrentSweeping is @@ -679,9 +688,9 @@ MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack) return false; } - I_64 nanosLeft = _utilTracker->getNanosLeft(env, getStartTimeOfCurrentGCSlice()); + int64_t nanosLeft = _utilTracker->getNanosLeft(env, getStartTimeOfCurrentGCSlice()); if (nanosLeft > 0) { - if ((U_64)nanosLeft > timeSlack) { + if ((uint64_t)nanosLeft > timeSlack) { return false; } } @@ -690,7 +699,7 @@ MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack) } bool -MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, U_64 timeSlack) +MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, uint64_t timeSlack) { return condYieldFromGC(env, timeSlack); } @@ -706,7 +715,7 @@ MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, U_64 timeSlack) * @return true if yielding actually occurred, false otherwise */ bool -MM_Scheduler::condYieldFromGC(MM_EnvironmentBase *envBase, U_64 timeSlack) +MM_Scheduler::condYieldFromGC(MM_EnvironmentBase *envBase, uint64_t timeSlack) { MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(envBase); @@ -762,33 +771,23 @@ void MM_Scheduler::yieldFromGC(MM_EnvironmentRealtime *env, bool distanceChecked } } -void -MM_Scheduler::run(MM_EnvironmentBase *env, MM_Task *task, uintptr_t newThreadCount) -{ - uintptr_t activeThreads = recomputeActiveThreadCountForTask(env, task, newThreadCount); - task->mainSetup(env); - prepareThreadsForTask(env, task, activeThreads); - acceptTask(env); - task->run(env); - completeTask(env); - cleanupAfterTask(env); - task->mainCleanup(env); -} - void MM_Scheduler::prepareThreadsForTask(MM_EnvironmentBase *env, MM_Task *task, uintptr_t threadCount) { omrthread_monitor_enter(_workerThreadMutex); _workerThreadsReservedForGC = true; + uintptr_t activeThreads = recomputeActiveThreadCountForTask(env, task, threadCount); + + task->mainSetup(env); task->setSynchronizeMutex(_synchronizeMutex); - for (uintptr_t index=0; index < threadCount; index++) { + for (uintptr_t index = 0; index < activeThreads; index++) { _statusTable[index] = worker_status_reserved; _taskTable[index] = task; } - wakeUpThreads(threadCount); + wakeUpThreads(activeThreads); omrthread_monitor_exit(_workerThreadMutex); pushYieldCollaborator(((MM_IncrementalParallelTask *)task)->getYieldCollaborator()); @@ -829,7 +828,7 @@ MM_Scheduler::startUpThreads() /* Now that the GC threads are started, it is safe to start the alarm thread */ _alarmThread = MM_MetronomeAlarmThread::newInstance(&env); - if (_alarmThread == NULL) { + if (NULL == _alarmThread) { omrtty_printf("Unable to initialize alarm thread for time-based GC scheduling\n"); omrtty_printf("Most likely cause is non-supported version of OS\n"); return false; @@ -877,13 +876,13 @@ MM_Scheduler::workerEntryPoint(MM_EnvironmentBase *envModron) omrthread_monitor_enter(_workerThreadMutex); - while(worker_status_dying != _statusTable[workerID]) { + while (worker_status_dying != _statusTable[workerID]) { /* Wait for a task to be dispatched to the worker thread */ - while(worker_status_waiting == _statusTable[workerID]) { + while (worker_status_waiting == _statusTable[workerID]) { omrthread_monitor_wait(_workerThreadMutex); } - if(worker_status_reserved == _statusTable[workerID]) { + if (worker_status_reserved == _statusTable[workerID]) { /* Found a task to dispatch to - do prep work for dispatch */ acceptTask(env); omrthread_monitor_exit(_workerThreadMutex); @@ -895,6 +894,7 @@ MM_Scheduler::workerEntryPoint(MM_EnvironmentBase *envModron) completeTask(env); } } + omrthread_monitor_exit(_workerThreadMutex); } @@ -1113,7 +1113,7 @@ MM_Scheduler::incrementMutatorCount() extern "C" { void -j9gc_startGCIfTimeExpired(OMR_VMThread* vmThread) +j9gc_startGCIfTimeExpired(OMR_VMThread *vmThread) { MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(vmThread); MM_Scheduler *scheduler = (MM_Scheduler *)env->getExtensions()->dispatcher; diff --git a/runtime/gc_realtime/Scheduler.hpp b/runtime/gc_realtime/Scheduler.hpp index 4c4373b1f8e..7a5feafe3f9 100644 --- a/runtime/gc_realtime/Scheduler.hpp +++ b/runtime/gc_realtime/Scheduler.hpp @@ -61,8 +61,8 @@ class MM_Scheduler : public MM_ParallelDispatcher * Data members */ private: - U_64 _mutatorStartTimeInNanos; /**< Time in nanoseconds when the mutator slice started. This is updated at increment end and when a GC quantum is skipped due to shouldMutatorDoubleBeat */ - U_64 _incrementStartTimeInNanos; /**< Time in nanoseconds when the last gc increment started */ + uint64_t _mutatorStartTimeInNanos; /**< Time in nanoseconds when the mutator slice started. This is updated at increment end and when a GC quantum is skipped due to shouldMutatorDoubleBeat */ + uint64_t _incrementStartTimeInNanos; /**< Time in nanoseconds when the last gc increment started */ MM_GCCode _gcCode; /**< The gc code that will be used for the next GC cycle. If this is modified during a collect it will be unused. This variable is reset at the end of every cycle to the default collection type */ protected: @@ -72,21 +72,16 @@ class MM_Scheduler : public MM_ParallelDispatcher MM_YieldCollaborator *_yieldCollaborator; - /* - * Cached value for shouldGCYield() - */ - volatile bool _shouldGCYield; + volatile bool _shouldGCYield; /**< Cached value for shouldGCYield() */ /* When utilization is over 50%, multiple consecutive GC beats is not allowed. * Double-beating is allowed between 33% and 50% utilization and so on. * We must keep track of the number of consecutive beats dynamically to ensure this. */ - I_32 _currentConsecutiveBeats; + int32_t _currentConsecutiveBeats; bool *_threadResumedTable; /**< Used to keep track of threads taken out of the suspended state when wakeUpThreads is called */ - bool _mainThreadMustShutDown; /**< Set when the main thread must shutdown */ - bool _exclusiveVMAccessRequired; /**< This flag is used by the main thread to see if it needs to get exclusive vm access */ MM_MetronomeAlarmThread *_alarmThread; @@ -98,7 +93,8 @@ class MM_Scheduler : public MM_ParallelDispatcher MM_GCExtensionsBase *_extensions; bool _doSchedulingBarrierEvents; - U_32 _gcOn; /* Are we in some long GC cycle? */ + uint32_t _gcOn; /**< Are we in some long GC cycle? */ + typedef enum { MUTATOR = 0, /* main blocked on a monitor, mutators running */ WAKING_GC, @@ -108,16 +104,17 @@ class MM_Scheduler : public MM_ParallelDispatcher WAKING_MUTATOR, /* main thread still awake, mutators running */ NUM_MODES } Mode; + Mode _mode; uintptr_t _gcPhaseSet; - /* requests (typically by a mutator) to complete GC synchronously */ - bool _completeCurrentGCSynchronously; - /* copy of the request made by Main Thread at the beginning of very next GC increment */ - bool _completeCurrentGCSynchronouslyMainThreadCopy; + bool _completeCurrentGCSynchronously; /**< Requests (typically by a mutator) to complete GC synchronously */ + bool _completeCurrentGCSynchronouslyMainThreadCopy; /**< Copy of the request made by Main Thread at the beginning of very next GC increment */ GCReason _completeCurrentGCSynchronouslyReason; uintptr_t _completeCurrentGCSynchronouslyReasonParameter; + /* monitor used to suspend/resume main thread, - * but also to ensure atomic access/change of _completeCurrentGCSynchronously/_mode */ + * but also to ensure atomic access/change of _completeCurrentGCSynchronously/_mode + */ omrthread_monitor_t _mainThreadMonitor; MM_OSInterface *_osInterface; @@ -125,7 +122,7 @@ class MM_Scheduler : public MM_ParallelDispatcher /* Params generated from command-line options from mmparse */ double _window; double _beat; - U_64 _beatNanos; + uint64_t _beatNanos; double _staticTargetUtilization; MM_UtilizationTracker* _utilTracker; @@ -157,7 +154,7 @@ class MM_Scheduler : public MM_ParallelDispatcher virtual void workerEntryPoint(MM_EnvironmentBase *env); virtual void mainEntryPoint(MM_EnvironmentBase *env); - bool internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack); + bool internalShouldGCYield(MM_EnvironmentRealtime *env, uint64_t timeSlack); /** @} */ @@ -173,11 +170,11 @@ class MM_Scheduler : public MM_ParallelDispatcher void shutDownMainThread(); void startGCIfTimeExpired(MM_EnvironmentBase *env); - virtual bool condYieldFromGCWrapper(MM_EnvironmentBase *env, U_64 timeSlack = 0); + virtual bool condYieldFromGCWrapper(MM_EnvironmentBase *env, uint64_t timeSlack = 0); uintptr_t incrementMutatorCount(); - uintptr_t getParameter(uintptr_t which, char *keyBuffer, I_32 keyBufferSize, char *valueBuffer, I_32 valueBufferSize); + uintptr_t getParameter(uintptr_t which, char *keyBuffer, int32_t keyBufferSize, char *valueBuffer, int32_t valueBufferSize); void showParameters(MM_EnvironmentBase *env); /** @@ -190,7 +187,7 @@ class MM_Scheduler : public MM_ParallelDispatcher bool isInitialized() { return _isInitialized; } - static MM_Scheduler *newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void* handler_arg, uintptr_t defaultOSStackSize); + static MM_Scheduler *newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void *handler_arg, uintptr_t defaultOSStackSize); bool initialize(MM_EnvironmentBase *env); virtual void kill(MM_EnvironmentBase *env); void tearDown(MM_EnvironmentBase *env); @@ -202,7 +199,7 @@ class MM_Scheduler : public MM_ParallelDispatcher virtual void completeTask(MM_EnvironmentBase *env); void checkStartGC(MM_EnvironmentRealtime *env); - void startGC(MM_EnvironmentBase *env); /* Call when some space threshold is triggered. */ + void startGC(MM_EnvironmentBase *env); /**< Call when some space threshold is triggered. */ void stopGC(MM_EnvironmentBase *env); bool isGCOn(); bool shouldGCDoubleBeat(MM_EnvironmentRealtime *env); @@ -210,14 +207,17 @@ class MM_Scheduler : public MM_ParallelDispatcher void reportStartGCIncrement(MM_EnvironmentRealtime *env); void reportStopGCIncrement(MM_EnvironmentRealtime *env, bool isCycleEnd = false); void restartMutatorsAndWait(MM_EnvironmentRealtime *env); - bool shouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack); - bool condYieldFromGC(MM_EnvironmentBase *env, U_64 timeSlack = 0); + bool shouldGCYield(MM_EnvironmentRealtime *env, uint64_t timeSlack); + bool condYieldFromGC(MM_EnvironmentBase *env, uint64_t timeSlack = 0); /* Low-level yielding from inside the Scheduler */ void yieldFromGC(MM_EnvironmentRealtime *env, bool distanceChecked = false); void waitForMutatorsToStop(MM_EnvironmentRealtime *env); void startMutators(MM_EnvironmentRealtime *env); - bool continueGC(MM_EnvironmentRealtime *, GCReason reason, uintptr_t reasonParameter, OMR_VMThread *_vmThread, bool doRequestExclusiveVMAccess); /* Non-blocking and typicallly called by an alarm handler. Returns 1 if we did resume GC (non-recursive). */ - void setGCPriority(MM_EnvironmentBase *env, uintptr_t priority); /* sets the priority for all gc threads */ + + /* Non-blocking and typicallly called by an alarm handler. Returns 1 if we did resume GC (non-recursive). */ + bool continueGC(MM_EnvironmentRealtime *, GCReason reason, uintptr_t reasonParameter, OMR_VMThread *_vmThread, bool doRequestExclusiveVMAccess); + + void setGCPriority(MM_EnvironmentBase *env, uintptr_t priority); /**< Sets the priority for all gc threads */ void completeCurrentGCSynchronously(MM_EnvironmentRealtime *env = NULL); uintptr_t verbose() { return _extensions->verbose; } @@ -228,14 +228,14 @@ class MM_Scheduler : public MM_ParallelDispatcher /* Time and Work Statistics */ void startGCTime(MM_EnvironmentRealtime *env, bool isDoubleBeat); void stopGCTime(MM_EnvironmentRealtime *env); - U_64 getStartTimeOfCurrentMutatorSlice() {return _mutatorStartTimeInNanos;} - void setStartTimeOfCurrentMutatorSlice(U_64 time) {_mutatorStartTimeInNanos = time;} - U_64 getStartTimeOfCurrentGCSlice() {return _incrementStartTimeInNanos;} - void setStartTimeOfCurrentGCSlice(U_64 time) {_incrementStartTimeInNanos = time;} + uint64_t getStartTimeOfCurrentMutatorSlice() { return _mutatorStartTimeInNanos; } + void setStartTimeOfCurrentMutatorSlice(uint64_t time) { _mutatorStartTimeInNanos = time; } + uint64_t getStartTimeOfCurrentGCSlice() { return _incrementStartTimeInNanos; } + void setStartTimeOfCurrentGCSlice(uint64_t time) { _incrementStartTimeInNanos = time; } uintptr_t getActiveThreadCount() { return _activeThreadCount; } uintptr_t getTaskThreadCount(MM_EnvironmentBase *env); - void setGCCode(MM_GCCode gcCode) {_gcCode = gcCode;} + void setGCCode(MM_GCCode gcCode) { _gcCode = gcCode; } void collectorInitialized(MM_RealtimeGC *gc); @@ -244,9 +244,7 @@ class MM_Scheduler : public MM_ParallelDispatcher virtual void prepareForCheckpoint(MM_EnvironmentBase *env, uintptr_t newThreadCount) {}; #endif /* defined(J9VM_OPT_CRIU_SUPPORT) */ - virtual void run(MM_EnvironmentBase *env, MM_Task *task, uintptr_t threadCount = UDATA_MAX); - - MM_Scheduler(MM_EnvironmentBase *env, omrsig_handler_fn handler, void* handler_arg, uintptr_t defaultOSStackSize) : + MM_Scheduler(MM_EnvironmentBase *env, omrsig_handler_fn handler, void *handler_arg, uintptr_t defaultOSStackSize) : MM_ParallelDispatcher(env, handler, handler_arg, defaultOSStackSize), _mutatorStartTimeInNanos(J9CONST64(0)), _incrementStartTimeInNanos(J9CONST64(0)),