Skip to content

Commit

Permalink
Compact trigger adjustment
Browse files Browse the repository at this point in the history
Dark matter and paging compact triggers currently only compare potential
free memory gains vs current free memory (with aim to reduce Global GC
frequency). While trigger thresholds are set rather high, they can still
easily trigger with Tenure being fairly full.

To account for the cost of work by doing the compact, the gains are
not just compared with total free memory but with total heap size, too.

That alone would make the trigger way less probable to occur, so as a
compromise: the weight of total heap that account into the formula is
set to 50%, and thresholds are cut by a factor of 2 or so.

Signed-off-by: Aleksandar Micic <amicic@ca.ibm.com>
  • Loading branch information
amicic committed Nov 15, 2019
1 parent dd23311 commit 538fd9b
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 26 deletions.
4 changes: 2 additions & 2 deletions gc/base/GCExtensionsBase.hpp
Expand Up @@ -1427,7 +1427,7 @@ class MM_GCExtensionsBase : public MM_BaseVirtual {
, heapAlignment(HEAP_ALIGNMENT)
, absoluteMinimumOldSubSpaceSize(MINIMUM_OLD_SPACE_SIZE)
, absoluteMinimumNewSubSpaceSize(MINIMUM_NEW_SPACE_SIZE)
, darkMatterCompactThreshold((float)0.40)
, darkMatterCompactThreshold((float)0.15)
, parSweepChunkSize(0)
, heapExpansionMinimumSize(1024 * 1024)
, heapExpansionMaximumSize(0)
Expand Down Expand Up @@ -1764,7 +1764,7 @@ class MM_GCExtensionsBase : public MM_BaseVirtual {
, idleMinimumFree(0)
, gcOnIdle(false)
, compactOnIdle(false)
, gcOnIdleCompactThreshold((float)0.25)
, gcOnIdleCompactThreshold((float)0.10)
#endif /* defined(OMR_GC_IDLE_HEAP_MANAGER) */
#if defined(OMR_VALGRIND_MEMCHECK)
, valgrindMempoolAddr(0)
Expand Down
43 changes: 19 additions & 24 deletions gc/base/standard/ParallelGlobalGC.cpp
Expand Up @@ -718,48 +718,43 @@ MM_ParallelGlobalGC::shouldCompactThisCycle(MM_EnvironmentBase *env, MM_Allocate
}

{
MM_MemoryPool *memoryPool= _extensions->heap->getDefaultMemorySpace()->getTenureMemorySubSpace()->getMemoryPool();
/* Tenure space dark matter trigger */
MM_MemorySubSpace *memorySubSpace = _extensions->heap->getDefaultMemorySpace()->getTenureMemorySubSpace();
uintptr_t totalSize = memorySubSpace->getActiveMemorySize();
MM_MemoryPool *memoryPool= memorySubSpace->getMemoryPool();
uintptr_t darkMatterBytes = 0;
if (!_extensions->concurrentSweep) {
darkMatterBytes = memoryPool->getDarkMatterBytes();
}
uintptr_t freeMemorySize = memoryPool->getActualFreeMemorySize();
float darkMatterRatio = ((float)darkMatterBytes)/((float)freeMemorySize);
float darkMatterRatio = ((float)darkMatterBytes)/((float)freeMemorySize + (float)totalSize / 2);

float darkMatterThreshold = _extensions->getDarkMatterCompactThreshold();

if (darkMatterRatio > darkMatterThreshold) {
if (darkMatterRatio > _extensions->getDarkMatterCompactThreshold()) {
compactReason = COMPACT_MICRO_FRAG;
goto compactionReqd;
}
}

#if defined(OMR_GC_IDLE_HEAP_MANAGER)
if ((J9MMCONSTANT_EXPLICIT_GC_IDLE_GC == gcCode.getCode()) && (_extensions->gcOnIdle)){
if ((J9MMCONSTANT_EXPLICIT_GC_IDLE_GC == gcCode.getCode()) && (_extensions->gcOnIdle)){

MM_MemoryPool *memoryPool= _extensions->heap->getDefaultMemorySpace()->getTenureMemorySubSpace()->getMemoryPool();
MM_LargeObjectAllocateStats *stats = memoryPool->getLargeObjectAllocateStats();
MM_LargeObjectAllocateStats *stats = memoryPool->getLargeObjectAllocateStats();

uintptr_t pageSize = env->getExtensions()->heap->getPageSize();
uintptr_t freeMemory = stats->getFreeMemory();
uintptr_t reusableFreeMemory = stats->getPageAlignedFreeMemory(pageSize);
uintptr_t pageSize = env->getExtensions()->heap->getPageSize();
uintptr_t reusableFreeMemory = stats->getPageAlignedFreeMemory(pageSize);

uintptr_t darkMatter = 0;
if (!_extensions->concurrentSweep){
darkMatter = memoryPool->getDarkMatterBytes();
}
uintptr_t memoryFragmentationDiff = freeMemory - reusableFreeMemory;
uintptr_t totalFragmentation = memoryFragmentationDiff + darkMatter;
float totalFragmentationRatio = ((float)totalFragmentation)/((float)freeMemory);
uintptr_t memoryFragmentationDiff = freeMemorySize - reusableFreeMemory;
uintptr_t totalFragmentation = memoryFragmentationDiff + darkMatterBytes;
float totalFragmentationRatio = ((float)totalFragmentation)/((float)freeMemorySize + (float)totalSize / 2);

Trc_ParallelGlobalGC_shouldCompactThisCycle(env->getLanguageVMThread(), totalFragmentationRatio, _extensions->gcOnIdleCompactThreshold);
Trc_ParallelGlobalGC_shouldCompactThisCycle(env->getLanguageVMThread(), totalFragmentationRatio, _extensions->gcOnIdleCompactThreshold);

if (totalFragmentationRatio > _extensions->gcOnIdleCompactThreshold) {
compactReason = COMPACT_PAGE;
goto compactionReqd;
if (totalFragmentationRatio > _extensions->gcOnIdleCompactThreshold) {
compactReason = COMPACT_PAGE;
goto compactionReqd;
}
}
#endif /* OMR_GC_IDLE_HEAP_MANAGER */
}
#endif /* OMR_GC_IDLE_HEAP_MANAGER */


nocompact:
Expand Down

0 comments on commit 538fd9b

Please sign in to comment.