Skip to content

Commit

Permalink
Merge pull request #17273 from RSalman/scheduler-follow-up
Browse files Browse the repository at this point in the history
GC Scheduler Cleanup
  • Loading branch information
amicic committed Apr 28, 2023
2 parents 6eb1d66 + ecd7b42 commit b862c8b
Show file tree
Hide file tree
Showing 2 changed files with 84 additions and 86 deletions.
106 changes: 53 additions & 53 deletions runtime/gc_realtime/Scheduler.cpp
Expand Up @@ -50,10 +50,10 @@
* @ingroup GC_Metronome methodGroup
*/
MM_Scheduler*
MM_Scheduler::newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void* handler_arg, uintptr_t defaultOSStackSize)
MM_Scheduler::newInstance(MM_EnvironmentBase *env, omrsig_handler_fn handler, void *handler_arg, uintptr_t defaultOSStackSize)
{
MM_Scheduler *scheduler = (MM_Scheduler *)env->getForge()->allocate(sizeof(MM_Scheduler), MM_AllocationCategory::FIXED, OMR_GET_CALLSITE());
if (scheduler) {
if (NULL != scheduler) {
new(scheduler) MM_Scheduler(env, handler, handler_arg, defaultOSStackSize);
if (!scheduler->initialize(env)) {
scheduler->kill(env);
Expand Down Expand Up @@ -82,21 +82,26 @@ MM_Scheduler::kill(MM_EnvironmentBase *env)
void
MM_Scheduler::tearDown(MM_EnvironmentBase *env)
{
if (_mainThreadMonitor) {
if (NULL != _mainThreadMonitor) {
omrthread_monitor_destroy(_mainThreadMonitor);
_mainThreadMonitor = NULL;
}

if (NULL != _threadResumedTable) {
env->getForge()->free(_threadResumedTable);
_threadResumedTable = NULL;
}

if (NULL != _utilTracker) {
_utilTracker->kill(env);
_utilTracker = NULL;
}

MM_ParallelDispatcher::kill(env);
}

uintptr_t
MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, I_32 keyBufferSize, char *valueBuffer, I_32 valueBufferSize)
MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, int32_t keyBufferSize, char *valueBuffer, int32_t valueBufferSize)
{
OMRPORT_ACCESS_FROM_OMRVM(_vm);
switch (which) {
Expand All @@ -106,8 +111,8 @@ MM_Scheduler::getParameter(uintptr_t which, char *keyBuffer, I_32 keyBufferSize,
case 1:
{
omrstr_printf(keyBuffer, keyBufferSize, "Scheduling Method");
I_32 len = (I_32)omrstr_printf(valueBuffer, valueBufferSize, "TIME_BASED with ");
while (_alarmThread == NULL || _alarmThread->_alarm == NULL) {
int32_t len = (int32_t)omrstr_printf(valueBuffer, valueBufferSize, "TIME_BASED with ");
while ((NULL == _alarmThread) || (NULL == _alarmThread->_alarm)) {
/* Wait for GC to finish initializing */
omrthread_sleep(100);
}
Expand Down Expand Up @@ -155,11 +160,15 @@ MM_Scheduler::showParameters(MM_EnvironmentBase *env)
{
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);
omrtty_printf("****************************************************************************\n");
for (uintptr_t which=0; ; which++) {
for (uintptr_t which = 0; ; which++) {
char keyBuffer[256], valBuffer[256];
uintptr_t rc = getParameter(which, keyBuffer, sizeof(keyBuffer), valBuffer, sizeof(valBuffer));
if (rc == 0) { break; }
if (rc == 1) { omrtty_printf("%s: %s\n", keyBuffer, valBuffer); }
if (rc == 0) {
break;
}
if (rc == 1) {
omrtty_printf("%s: %s\n", keyBuffer, valBuffer);
}
}
omrtty_printf("****************************************************************************\n");
}
Expand Down Expand Up @@ -222,13 +231,13 @@ MM_Scheduler::initialize(MM_EnvironmentBase *env)
if (METRONOME_DEFAULT_BEAT_MICRO < _extensions->beatMicro) {
uintptr_t intervalToSkipYieldCheckMicro = _extensions->beatMicro - METRONOME_DEFAULT_BEAT_MICRO;
uintptr_t maxInterYieldTimeMicro = INTER_YIELD_MAX_NS / 1000;
_extensions->distanceToYieldTimeCheck = (U_32)(intervalToSkipYieldCheckMicro / maxInterYieldTimeMicro);
_extensions->distanceToYieldTimeCheck = (uint32_t)(intervalToSkipYieldCheckMicro / maxInterYieldTimeMicro);
}

/* Show GC parameters here before we enter real execution */
_window = _extensions->timeWindowMicro / 1e6;
_beat = _extensions->beatMicro / 1e6;
_beatNanos = (U_64) (_extensions->beatMicro * 1e3);
_beatNanos = (uint64_t) (_extensions->beatMicro * 1e3);
_staticTargetUtilization = _extensions->targetUtilizationPercentage / 1e2;
_utilTracker = MM_UtilizationTracker::newInstance(env, _window, _beatNanos, _staticTargetUtilization);
if (NULL == _utilTracker) {
Expand Down Expand Up @@ -327,13 +336,13 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t
* Make sure _completeCurrentGCSynchronously and _mode are atomically changed.
*/
omrthread_monitor_enter(_mainThreadMonitor);
switch(reason) {
switch (reason) {
case OUT_OF_MEMORY_TRIGGER:
/* For now we assume that OUT_OF_MEMORY trigger means perform
* a synchronous GC, but maybe we want a mode where we try one
* more time slice before degrading to synchronous.
*/
if(!_extensions->synchronousGCOnOOM) {
if (!_extensions->synchronousGCOnOOM) {
break;
}
/* fall through */
Expand All @@ -347,7 +356,7 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t

break;
default: /* WORK_TRIGGER or TIME_TRIGGER */ {
if(_threadWaitingOnMainThreadMonitor != NULL) {
if (NULL != _threadWaitingOnMainThreadMonitor) {
/* Check your timer again incase another thread beat you to checking for shouldMutatorDoubleBeat */
if (env->getTimer()->hasTimeElapsed(getStartTimeOfCurrentMutatorSlice(), _beatNanos)) {
if (shouldMutatorDoubleBeat(_threadWaitingOnMainThreadMonitor, env->getTimer())) {
Expand All @@ -367,7 +376,7 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t
break;
}
}
if(_threadWaitingOnMainThreadMonitor == NULL) {
if (NULL == _threadWaitingOnMainThreadMonitor) {
/*
* The gc thread(s) are already awake and collecting (otherwise, the main
* gc thread would be waiting on the monitor).
Expand Down Expand Up @@ -423,7 +432,7 @@ MM_Scheduler::continueGC(MM_EnvironmentRealtime *env, GCReason reason, uintptr_t
uintptr_t
MM_Scheduler::getTaskThreadCount(MM_EnvironmentBase *env)
{
if (env->_currentTask == NULL) {
if (NULL == env->_currentTask) {
return 1;
}
return env->_currentTask->getThreadCount();
Expand All @@ -436,7 +445,7 @@ MM_Scheduler::waitForMutatorsToStop(MM_EnvironmentRealtime *env)
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);

/* we need to record how long it took to wait for the mutators to stop */
U_64 exclusiveAccessTime = omrtime_hires_clock();
uint64_t exclusiveAccessTime = omrtime_hires_clock();

/* The time before acquiring exclusive VM access is charged to the mutator but the time
* during the acquisition is conservatively charged entirely to the GC. */
Expand All @@ -451,7 +460,7 @@ MM_Scheduler::waitForMutatorsToStop(MM_EnvironmentRealtime *env)
* TODO: This approach is just to fix some timing holes in shutdown. Consider removing this
* "if" statement and fix alarm thread not to die before requesting exclusive access for us.
*/
if (_mainThreadMustShutDown && _mode != WAKING_GC) {
if (_mainThreadMustShutDown && (WAKING_GC != _mode)) {
uintptr_t gcPriority = 0;
_gc->getRealtimeDelegate()->requestExclusiveVMAccess(env, TRUE /* block */, &gcPriority);
_gc->setGCThreadPriority(env->getOmrVMThread(), gcPriority);
Expand Down Expand Up @@ -496,7 +505,7 @@ MM_Scheduler::shouldGCDoubleBeat(MM_EnvironmentRealtime *env)
if (targetUtilization <= 0.0) {
return true;
}
I_32 maximumAllowedConsecutiveBeats = (I_32) (1.0 / targetUtilization);
int32_t maximumAllowedConsecutiveBeats = (int32_t) (1.0 / targetUtilization);
if (_currentConsecutiveBeats >= maximumAllowedConsecutiveBeats) {
return false;
}
Expand Down Expand Up @@ -526,10 +535,10 @@ MM_Scheduler::reportStartGCIncrement(MM_EnvironmentRealtime *env)
{
OMRPORT_ACCESS_FROM_ENVIRONMENT(env);

if(_completeCurrentGCSynchronously) {
if (_completeCurrentGCSynchronously) {
_completeCurrentGCSynchronouslyMainThreadCopy = true;
U_64 exclusiveAccessTimeMicros = 0;
U_64 meanExclusiveAccessIdleTimeMicros = 0;
uint64_t exclusiveAccessTimeMicros = 0;
uint64_t meanExclusiveAccessIdleTimeMicros = 0;

Trc_MM_SystemGCStart(env->getLanguageVMThread(),
_extensions->heap->getApproximateActiveFreeMemorySize(MEMORY_TYPE_NEW),
Expand All @@ -543,10 +552,10 @@ MM_Scheduler::reportStartGCIncrement(MM_EnvironmentRealtime *env)
exclusiveAccessTimeMicros = omrtime_hires_delta(0, env->getExclusiveAccessTime(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
meanExclusiveAccessIdleTimeMicros = omrtime_hires_delta(0, env->getMeanExclusiveAccessIdleTime(), OMRPORT_TIME_DELTA_IN_MICROSECONDS);
Trc_MM_ExclusiveAccess(env->getLanguageVMThread(),
(U_32)(exclusiveAccessTimeMicros / 1000),
(U_32)(exclusiveAccessTimeMicros % 1000),
(U_32)(meanExclusiveAccessIdleTimeMicros / 1000),
(U_32)(meanExclusiveAccessIdleTimeMicros % 1000),
(uint32_t)(exclusiveAccessTimeMicros / 1000),
(uint32_t)(exclusiveAccessTimeMicros % 1000),
(uint32_t)(meanExclusiveAccessIdleTimeMicros / 1000),
(uint32_t)(meanExclusiveAccessIdleTimeMicros % 1000),
env->getExclusiveAccessHaltedThreads(),
env->getLastExclusiveAccessResponder(),
env->exclusiveAccessBeatenByOtherThread());
Expand Down Expand Up @@ -637,7 +646,7 @@ MM_Scheduler::restartMutatorsAndWait(MM_EnvironmentRealtime *env)
}

bool
MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, uint64_t timeSlack)
{
return internalShouldGCYield(env, timeSlack);
}
Expand All @@ -652,14 +661,14 @@ MM_Scheduler::shouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
* @return true if the GC thread should yield, false otherwise
*/
MMINLINE bool
MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, uint64_t timeSlack)
{
if (_completeCurrentGCSynchronouslyMainThreadCopy) {
/* If we have degraded to a synchronous GC, don't yield until finished */
return false;
}
/* Be harmless when called indirectly on mutator thread */
if (env->getThreadType() == MUTATOR_THREAD) {
if (MUTATOR_THREAD == env->getThreadType()) {
return false;
}
/* The GC does not have to yield when ConcurrentTracing or ConcurrentSweeping is
Expand All @@ -679,9 +688,9 @@ MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
return false;
}

I_64 nanosLeft = _utilTracker->getNanosLeft(env, getStartTimeOfCurrentGCSlice());
int64_t nanosLeft = _utilTracker->getNanosLeft(env, getStartTimeOfCurrentGCSlice());
if (nanosLeft > 0) {
if ((U_64)nanosLeft > timeSlack) {
if ((uint64_t)nanosLeft > timeSlack) {
return false;
}
}
Expand All @@ -690,7 +699,7 @@ MM_Scheduler::internalShouldGCYield(MM_EnvironmentRealtime *env, U_64 timeSlack)
}

bool
MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, U_64 timeSlack)
MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, uint64_t timeSlack)
{
return condYieldFromGC(env, timeSlack);
}
Expand All @@ -706,7 +715,7 @@ MM_Scheduler::condYieldFromGCWrapper(MM_EnvironmentBase *env, U_64 timeSlack)
* @return true if yielding actually occurred, false otherwise
*/
bool
MM_Scheduler::condYieldFromGC(MM_EnvironmentBase *envBase, U_64 timeSlack)
MM_Scheduler::condYieldFromGC(MM_EnvironmentBase *envBase, uint64_t timeSlack)
{
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(envBase);

Expand Down Expand Up @@ -762,33 +771,23 @@ void MM_Scheduler::yieldFromGC(MM_EnvironmentRealtime *env, bool distanceChecked
}
}

void
MM_Scheduler::run(MM_EnvironmentBase *env, MM_Task *task, uintptr_t newThreadCount)
{
uintptr_t activeThreads = recomputeActiveThreadCountForTask(env, task, newThreadCount);
task->mainSetup(env);
prepareThreadsForTask(env, task, activeThreads);
acceptTask(env);
task->run(env);
completeTask(env);
cleanupAfterTask(env);
task->mainCleanup(env);
}

void
MM_Scheduler::prepareThreadsForTask(MM_EnvironmentBase *env, MM_Task *task, uintptr_t threadCount)
{
omrthread_monitor_enter(_workerThreadMutex);
_workerThreadsReservedForGC = true;

uintptr_t activeThreads = recomputeActiveThreadCountForTask(env, task, threadCount);

task->mainSetup(env);
task->setSynchronizeMutex(_synchronizeMutex);

for (uintptr_t index=0; index < threadCount; index++) {
for (uintptr_t index = 0; index < activeThreads; index++) {
_statusTable[index] = worker_status_reserved;
_taskTable[index] = task;
}

wakeUpThreads(threadCount);
wakeUpThreads(activeThreads);
omrthread_monitor_exit(_workerThreadMutex);

pushYieldCollaborator(((MM_IncrementalParallelTask *)task)->getYieldCollaborator());
Expand Down Expand Up @@ -829,7 +828,7 @@ MM_Scheduler::startUpThreads()

/* Now that the GC threads are started, it is safe to start the alarm thread */
_alarmThread = MM_MetronomeAlarmThread::newInstance(&env);
if (_alarmThread == NULL) {
if (NULL == _alarmThread) {
omrtty_printf("Unable to initialize alarm thread for time-based GC scheduling\n");
omrtty_printf("Most likely cause is non-supported version of OS\n");
return false;
Expand Down Expand Up @@ -877,13 +876,13 @@ MM_Scheduler::workerEntryPoint(MM_EnvironmentBase *envModron)

omrthread_monitor_enter(_workerThreadMutex);

while(worker_status_dying != _statusTable[workerID]) {
while (worker_status_dying != _statusTable[workerID]) {
/* Wait for a task to be dispatched to the worker thread */
while(worker_status_waiting == _statusTable[workerID]) {
while (worker_status_waiting == _statusTable[workerID]) {
omrthread_monitor_wait(_workerThreadMutex);
}

if(worker_status_reserved == _statusTable[workerID]) {
if (worker_status_reserved == _statusTable[workerID]) {
/* Found a task to dispatch to - do prep work for dispatch */
acceptTask(env);
omrthread_monitor_exit(_workerThreadMutex);
Expand All @@ -895,6 +894,7 @@ MM_Scheduler::workerEntryPoint(MM_EnvironmentBase *envModron)
completeTask(env);
}
}

omrthread_monitor_exit(_workerThreadMutex);
}

Expand Down Expand Up @@ -1113,7 +1113,7 @@ MM_Scheduler::incrementMutatorCount()
extern "C" {

void
j9gc_startGCIfTimeExpired(OMR_VMThread* vmThread)
j9gc_startGCIfTimeExpired(OMR_VMThread *vmThread)
{
MM_EnvironmentRealtime *env = MM_EnvironmentRealtime::getEnvironment(vmThread);
MM_Scheduler *scheduler = (MM_Scheduler *)env->getExtensions()->dispatcher;
Expand Down

0 comments on commit b862c8b

Please sign in to comment.