diff --git a/gc/base/EnvironmentBase.cpp b/gc/base/EnvironmentBase.cpp index d3e3ed582d8..8e034f75ee6 100644 --- a/gc/base/EnvironmentBase.cpp +++ b/gc/base/EnvironmentBase.cpp @@ -370,7 +370,7 @@ MM_EnvironmentBase::isExclusiveAccessRequestWaiting() } bool -MM_EnvironmentBase::acquireExclusiveVMAccessForGC(MM_Collector *collector, bool failIfNotFirst, bool flushCaches) +MM_EnvironmentBase::acquireExclusiveVMAccessForGC(MM_Collector *collector, bool failIfNotFirst) { MM_GCExtensionsBase *extensions = getExtensions(); uintptr_t collectorAccessCount = collector->getExclusiveAccessCount(); @@ -452,10 +452,6 @@ MM_EnvironmentBase::acquireExclusiveVMAccessForGC(MM_Collector *collector, bool collector->incrementExclusiveAccessCount(); - if (flushCaches) { - GC_OMRVMInterface::flushCachesForGC(this); - } - return !_exclusiveAccessBeatenByOtherThread; } diff --git a/gc/base/EnvironmentBase.hpp b/gc/base/EnvironmentBase.hpp index c3882c11be7..eaf13f2e0e8 100644 --- a/gc/base/EnvironmentBase.hpp +++ b/gc/base/EnvironmentBase.hpp @@ -465,7 +465,7 @@ class MM_EnvironmentBase : public MM_BaseVirtual * @note this call should be considered a safe-point as the thread may release VM access to allow the other threads to acquire exclusivity. * @note this call supports recursion. */ - bool acquireExclusiveVMAccessForGC(MM_Collector *collector, bool failIfNotFirst = false, bool flushCaches = true); + bool acquireExclusiveVMAccessForGC(MM_Collector *collector, bool failIfNotFirst = false); /** * Release exclusive access. diff --git a/gc/base/MemorySubSpaceFlat.cpp b/gc/base/MemorySubSpaceFlat.cpp index adc2371ff06..33a6b7a56bc 100644 --- a/gc/base/MemorySubSpaceFlat.cpp +++ b/gc/base/MemorySubSpaceFlat.cpp @@ -92,7 +92,7 @@ MM_MemorySubSpaceFlat::allocationRequestFailed(MM_EnvironmentBase* env, MM_Alloc if (_collector) { allocateDescription->saveObjects(env); /* acquire exclusive access and, after we get it, see if we need to perform a collect or if someone else beat us to it */ - if (!env->acquireExclusiveVMAccessForGC(_collector, true, true)) { + if (!env->acquireExclusiveVMAccessForGC(_collector, true)) { allocateDescription->restoreObjects(env); /* Beaten to exclusive access for our collector by another thread - a GC must have occurred. This thread * does NOT have exclusive access at this point. Try and satisfy the allocate based on a GC having occurred. diff --git a/gc/base/MemorySubSpaceGenerational.cpp b/gc/base/MemorySubSpaceGenerational.cpp index 6f35c334e99..8f8733e93ff 100644 --- a/gc/base/MemorySubSpaceGenerational.cpp +++ b/gc/base/MemorySubSpaceGenerational.cpp @@ -88,7 +88,7 @@ MM_MemorySubSpaceGenerational::allocationRequestFailed(MM_EnvironmentBase *env, } allocateDescription->saveObjects(env); - if (!env->acquireExclusiveVMAccessForGC(_collector, true, true)) { + if (!env->acquireExclusiveVMAccessForGC(_collector, true)) { allocateDescription->restoreObjects(env); Trc_MM_MSSGenerational_allocationRequestFailed(env->getLanguageVMThread(), allocateDescription->getBytesRequested(), 2); addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, baseSubSpace); diff --git a/gc/base/MemorySubSpaceSemiSpace.cpp b/gc/base/MemorySubSpaceSemiSpace.cpp index f6925ab40b1..3bdca939f3a 100644 --- a/gc/base/MemorySubSpaceSemiSpace.cpp +++ b/gc/base/MemorySubSpaceSemiSpace.cpp @@ -96,7 +96,7 @@ MM_MemorySubSpaceSemiSpace::allocationRequestFailed(MM_EnvironmentBase *env, MM_ void *addr = NULL; allocateDescription->saveObjects(env); - if (!env->acquireExclusiveVMAccessForGC(_collector, true, true)) { + if (!env->acquireExclusiveVMAccessForGC(_collector, true)) { allocateDescription->restoreObjects(env); Trc_MM_MSSSS_allocationRequestFailed(env->getLanguageVMThread(), allocateDescription->getBytesRequested(), 1); addr = allocateGeneric(env, allocateDescription, allocationType, objectAllocationInterface, _memorySubSpaceAllocate); diff --git a/gc/base/standard/ConcurrentGC.cpp b/gc/base/standard/ConcurrentGC.cpp index ef5ed9f3756..8cc649aa741 100644 --- a/gc/base/standard/ConcurrentGC.cpp +++ b/gc/base/standard/ConcurrentGC.cpp @@ -67,6 +67,7 @@ #include "MemorySubSpaceFlat.hpp" #include "MemorySubSpaceSemiSpace.hpp" #include "ObjectModel.hpp" +#include "OMRVMInterface.hpp" #include "ParallelDispatcher.hpp" #include "SpinLimiter.hpp" #include "SublistIterator.hpp" @@ -2000,6 +2001,9 @@ MM_ConcurrentGC::internalPreCollect(MM_EnvironmentBase *env, MM_MemorySubSpace * MM_ParallelGlobalGC::internalPreCollect(env, subSpace, allocDescription, gcCode); } else if (CONCURRENT_TRACE_ONLY <= executionModeAtGC) { /* We are going to complete the concurrent cycle to generate the GCStart/GCIncrement events */ + + GC_OMRVMInterface::flushCachesForGC(env); + reportGCStart(env); reportGCIncrementStart(env); reportGlobalGCIncrementStart(env); diff --git a/gc/base/standard/ConcurrentGC.hpp b/gc/base/standard/ConcurrentGC.hpp index bdc959cd6c7..56a22ebd0e0 100644 --- a/gc/base/standard/ConcurrentGC.hpp +++ b/gc/base/standard/ConcurrentGC.hpp @@ -376,7 +376,7 @@ class MM_ConcurrentGC : public MM_ParallelGlobalGC virtual bool acquireExclusiveVMAccessForCycleEnd(MM_EnvironmentBase *env) { - return env->acquireExclusiveVMAccessForGC(this, true, true); + return env->acquireExclusiveVMAccessForGC(this, true); } public: diff --git a/gc/base/standard/ConcurrentGCIncrementalUpdate.hpp b/gc/base/standard/ConcurrentGCIncrementalUpdate.hpp index 09d544ae51e..321b4424347 100644 --- a/gc/base/standard/ConcurrentGCIncrementalUpdate.hpp +++ b/gc/base/standard/ConcurrentGCIncrementalUpdate.hpp @@ -194,7 +194,7 @@ class MM_ConcurrentGCIncrementalUpdate : public MM_ConcurrentGC virtual bool acquireExclusiveVMAccessForCycleStart(MM_EnvironmentBase *env) { - return env->acquireExclusiveVMAccessForGC(this, true, false); + return env->acquireExclusiveVMAccessForGC(this, true); } public: diff --git a/gc/base/standard/ConcurrentGCSATB.hpp b/gc/base/standard/ConcurrentGCSATB.hpp index e24734cc14e..0615f0785dd 100644 --- a/gc/base/standard/ConcurrentGCSATB.hpp +++ b/gc/base/standard/ConcurrentGCSATB.hpp @@ -31,6 +31,7 @@ #if defined(OMR_GC_MODRON_CONCURRENT_MARK) && defined(OMR_GC_REALTIME) #include "ConcurrentGC.hpp" +#include "OMRVMInterface.hpp" /** * @todo Provide class documentation @@ -81,7 +82,15 @@ class MM_ConcurrentGCSATB : public MM_ConcurrentGC * SATB marks all newly allocated objectes during active concurrent cycle. * Since it's done on TLH granularity we have to flush the current ones and start creating new ones, once the cycle starts. */ - return env->acquireExclusiveVMAccessForGC(this, true, true); + + bool didAcquire = env->acquireExclusiveVMAccessForGC(this, true); + + if (didAcquire) { + GC_OMRVMInterface::flushCachesForGC(env); + + } + + return didAcquire; } void enableSATB(MM_EnvironmentBase *env); diff --git a/gc/base/standard/Scavenger.cpp b/gc/base/standard/Scavenger.cpp index 1874d8a650a..3a73452dae8 100644 --- a/gc/base/standard/Scavenger.cpp +++ b/gc/base/standard/Scavenger.cpp @@ -4240,6 +4240,9 @@ MM_Scavenger::mainThreadGarbageCollect(MM_EnvironmentBase *envBase, MM_AllocateD bool firstIncrement = true; #endif + /* Flush any VM level changes to prepare for a safe slot walk */ + GC_OMRVMInterface::flushCachesForGC(env); + if (firstIncrement) { if (_extensions->processLargeAllocateStats) { processLargeAllocateStatsBeforeGC(env); @@ -4580,9 +4583,6 @@ MM_Scavenger::internalPreCollect(MM_EnvironmentBase *env, MM_MemorySubSpace *sub } } } - - /* Flush any VM level changes to prepare for a safe slot walk */ - GC_OMRVMInterface::flushCachesForGC(env); } /**