Skip to content

Commit

Permalink
Enable GC regions by default (dotnet#59283)
Browse files Browse the repository at this point in the history
* Enable regions by default

* ApproxTotalBytesInUse

* one line fix

* initialize CurrentThread before initializing GC

Without this fix enabling Regions is failing AOT tests on checked build.

* use GetCurrentThreadIfAvailable within GCToEEInterface::GetThread

This is similar to GCToEEInterface::GetThread implementation in the runtime.

Co-authored-by: Maoni Stephens <maonis@ntdev.microsoft.com>
  • Loading branch information
mangod9 and Maoni Stephens committed Jan 11, 2022
1 parent b389de5 commit cf91716
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 44 deletions.
130 changes: 89 additions & 41 deletions src/coreclr/gc/gc.cpp
Expand Up @@ -2522,6 +2522,8 @@ size_t gc_heap::bgc_loh_size_increased = 0;

size_t gc_heap::bgc_poh_size_increased = 0;

size_t gc_heap::background_soh_size_end_mark = 0;

size_t gc_heap::background_soh_alloc_count = 0;

size_t gc_heap::background_uoh_alloc_count = 0;
Expand Down Expand Up @@ -28892,6 +28894,14 @@ void gc_heap::plan_phase (int condemned_gen_number)
{
dprintf (2,( "**** Doing Compacting GC ****"));

#if defined(USE_REGIONS) && defined(BACKGROUND_GC)
if (should_update_end_mark_size())
{
background_soh_size_end_mark += generation_end_seg_allocated (older_gen) -
r_older_gen_end_seg_allocated;
}
#endif //USE_REGIONS && BACKGROUND_GC

#ifndef USE_REGIONS
if (should_expand)
{
Expand Down Expand Up @@ -29396,6 +29406,13 @@ void gc_heap::fix_generation_bounds (int condemned_gen_number,
}
}
#endif //MULTIPLE_HEAPS

#ifdef BACKGROUND_GC
if (should_update_end_mark_size())
{
background_soh_size_end_mark = generation_size (max_generation);
}
#endif //BACKGROUND_GC
#endif //!USE_REGIONS

{
Expand Down Expand Up @@ -29614,6 +29631,14 @@ void gc_heap::thread_final_regions (bool compact_p)
generation_final_regions[gen_idx].tail = generation_tail_region (gen);
}

#ifdef BACKGROUND_GC
heap_segment* max_gen_tail_region = 0;
if (should_update_end_mark_size())
{
max_gen_tail_region = generation_final_regions[max_generation].tail;
}
#endif //BACKGROUND_GC

// Step 2: for each region in the condemned generations, we thread it onto its planned generation
// in our generation_final_regions array.
for (int gen_idx = condemned_gen_number; gen_idx >= 0; gen_idx--)
Expand Down Expand Up @@ -29664,6 +29689,21 @@ void gc_heap::thread_final_regions (bool compact_p)
}
}

#ifdef BACKGROUND_GC
if (max_gen_tail_region)
{
max_gen_tail_region = heap_segment_next (max_gen_tail_region);

while (max_gen_tail_region)
{
background_soh_size_end_mark += heap_segment_allocated (max_gen_tail_region) -
heap_segment_mem (max_gen_tail_region);

max_gen_tail_region = heap_segment_next (max_gen_tail_region);
}
}
#endif //BACKGROUND_GC

// Step 4: if a generation doesn't have any regions, we need to get a new one for it;
// otherwise we just set the head region as the start region for that generation.
for (int gen_idx = 0; gen_idx <= max_generation; gen_idx++)
Expand Down Expand Up @@ -32933,6 +32973,11 @@ void gc_heap::decommit_mark_array_by_seg (heap_segment* seg)
}
}

bool gc_heap::should_update_end_mark_size()
{
return ((settings.condemned_generation == (max_generation - 1)) && (current_c_gc_state == c_gc_state_planning));
}

void gc_heap::background_mark_phase ()
{
verify_mark_array_cleared();
Expand Down Expand Up @@ -33007,6 +33052,7 @@ void gc_heap::background_mark_phase ()
bgc_begin_poh_size = total_poh_size;
bgc_loh_size_increased = 0;
bgc_poh_size_increased = 0;
background_soh_size_end_mark = 0;

dprintf (GTC_LOG, ("BM: h%d: loh: %Id, soh: %Id, poh: %Id", heap_number, total_loh_size, total_soh_size, total_poh_size));

Expand Down Expand Up @@ -33487,6 +33533,8 @@ void gc_heap::background_mark_phase ()
heap_segment_background_allocated (seg) = heap_segment_allocated (seg);
}

background_soh_size_end_mark += heap_segment_background_allocated (seg) - heap_segment_mem (seg);

dprintf (3333, ("h%d gen%d seg %Ix (%Ix) background allocated is %Ix",
heap_number, i, (size_t)(seg), heap_segment_mem (seg),
heap_segment_background_allocated (seg)));
Expand Down Expand Up @@ -45103,11 +45151,11 @@ size_t GCHeap::GetTotalBytesInUse ()
for (int i = 0; i < gc_heap::n_heaps; i++)
{
GCHeap* Hp = gc_heap::g_heaps [i]->vm_heap;
tot_size += Hp->ApproxTotalBytesInUse (FALSE);
tot_size += Hp->ApproxTotalBytesInUse();
}
return tot_size;
#else
return ApproxTotalBytesInUse ();
return ApproxTotalBytesInUse();
#endif //MULTIPLE_HEAPS
}

Expand Down Expand Up @@ -45162,58 +45210,58 @@ size_t GCHeap::ApproxTotalBytesInUse(BOOL small_heap_only)
size_t totsize = 0;
enter_spin_lock (&pGenGCHeap->gc_lock);

// the complication with the following code is that background GC may
// remove the ephemeral segment while we are iterating
// if so, we retry a couple times and ultimately may report a slightly wrong result
for (int tries = 1; tries <= 3; tries++)
// For gen0 it's a bit complicated because we are currently allocating in it. We get the fragmentation first
// just so that we don't give a negative number for the resulting size.
generation* gen = pGenGCHeap->generation_of (0);
size_t gen0_frag = generation_free_list_space (gen) + generation_free_obj_space (gen);
uint8_t* current_alloc_allocated = pGenGCHeap->alloc_allocated;
heap_segment* current_eph_seg = pGenGCHeap->ephemeral_heap_segment;
size_t gen0_size = 0;
#ifdef USE_REGIONS
heap_segment* gen0_seg = generation_start_segment (gen);
while (gen0_seg)
{
heap_segment* eph_seg = generation_allocation_segment (pGenGCHeap->generation_of (0));
// Get small block heap size info
totsize = (pGenGCHeap->alloc_allocated - heap_segment_mem (eph_seg));
heap_segment* seg1 = generation_start_segment (pGenGCHeap->generation_of (max_generation));
while ((seg1 != eph_seg) && (seg1 != nullptr)
#ifdef BACKGROUND_GC
&& (seg1 != pGenGCHeap->freeable_soh_segment)
#endif //BACKGROUND_GC
)
uint8_t* end = in_range_for_segment (current_alloc_allocated, gen0_seg) ?
current_alloc_allocated : heap_segment_allocated (gen0_seg);
gen0_size += end - heap_segment_mem (gen0_seg);

if (gen0_seg == current_eph_seg)
{
#ifdef BACKGROUND_GC
if (!heap_segment_decommitted_p (seg1))
#endif //BACKGROUND_GC
{
totsize += heap_segment_allocated (seg1) -
heap_segment_mem (seg1);
}
seg1 = heap_segment_next (seg1);
}
if (seg1 == eph_seg)
break;
}

gen0_seg = heap_segment_next (gen0_seg);
}
#else //USE_REGIONS
// For segments ephemeral seg does not change.
gen0_size = current_alloc_allocated - heap_segment_mem (current_eph_seg);
#endif //USE_REGIONS

//discount the fragmentation
for (int i = 0; i <= max_generation; i++)
totsize = gen0_size - gen0_frag;

int stop_gen_index = max_generation;

if (gc_heap::current_c_gc_state == c_gc_state_planning)
{
// During BGC sweep since we can be deleting SOH segments, we avoid walking the segment
// list.
generation* oldest_gen = pGenGCHeap->generation_of (max_generation);
totsize = pGenGCHeap->background_soh_size_end_mark - generation_free_list_space (oldest_gen) - generation_free_obj_space (oldest_gen);
stop_gen_index--;
}

for (int i = (max_generation - 1); i <= stop_gen_index; i++)
{
generation* gen = pGenGCHeap->generation_of (i);
totsize -= (generation_free_list_space (gen) + generation_free_obj_space (gen));
totsize += pGenGCHeap->generation_size (i) - generation_free_list_space (gen) - generation_free_obj_space (gen);
}

if (!small_heap_only)
{
for (int i = uoh_start_generation; i < total_generation_count; i++)
{
heap_segment* seg2 = generation_start_segment (pGenGCHeap->generation_of (i));

while (seg2 != 0)
{
totsize += heap_segment_allocated (seg2) -
heap_segment_mem (seg2);
seg2 = heap_segment_next (seg2);
}

//discount the fragmentation
generation* uoh_gen = pGenGCHeap->generation_of (i);
size_t frag = generation_free_list_space (uoh_gen) + generation_free_obj_space (uoh_gen);
totsize -= frag;
generation* gen = pGenGCHeap->generation_of (i);
totsize += pGenGCHeap->generation_size (i) - generation_free_list_space (gen) - generation_free_obj_space (gen);
}
}
leave_spin_lock (&pGenGCHeap->gc_lock);
Expand Down
10 changes: 8 additions & 2 deletions src/coreclr/gc/gcpriv.h
Expand Up @@ -51,8 +51,8 @@ inline void FATAL_GC_ERROR()
//
// This means any empty regions can be freely used for any generation. For
// Server GC we will balance regions between heaps.
// For now enable regions by default for only StandAlone GC builds
#if defined (HOST_64BIT) && defined (BUILD_AS_STANDALONE)
// For now disable regions StandAlone GC builds
#if defined (HOST_64BIT) && !defined (BUILD_AS_STANDALONE)
#define USE_REGIONS
#endif //HOST_64BIT && BUILD_AS_STANDALONE

Expand Down Expand Up @@ -3440,6 +3440,9 @@ class gc_heap
PER_HEAP
void decommit_mark_array_by_seg (heap_segment* seg);

PER_HEAP_ISOLATED
bool should_update_end_mark_size();

PER_HEAP
void background_mark_phase();

Expand Down Expand Up @@ -4268,6 +4271,9 @@ class gc_heap
PER_HEAP
size_t bgc_poh_size_increased;

PER_HEAP
size_t background_soh_size_end_mark;

PER_HEAP
size_t background_soh_alloc_count;

Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/nativeaot/Runtime/gcrhenv.cpp
Expand Up @@ -923,7 +923,7 @@ void GCToEEInterface::DisablePreemptiveGC()
Thread* GCToEEInterface::GetThread()
{
#ifndef DACCESS_COMPILE
return ThreadStore::GetCurrentThread();
return ThreadStore::GetCurrentThreadIfAvailable();
#else
return NULL;
#endif
Expand Down

0 comments on commit cf91716

Please sign in to comment.