Skip to content

Commit

Permalink
Decommit region tails (#66008)
Browse files Browse the repository at this point in the history
I observed that with gen 1 regions, we often get into the situation that gen 1 is much smaller per heap than a region. So it makes sense to decommit the tail end of the last region in an ephemeral generation guided by the budget for that generation.

To implement this, I reactivated decommit_target for regions and have decommit_step call decommit_ephemeral_segment_pages_step which in the regions case needs to synchronize with the allocator. This is done by taking the more space lock.

Note that with default settings, this decommitting logic will usually only apply to gen 1 because normally gen 0 is larger than a region. It can still happen for gen 0 though if gen 0 has pins and thus already has enough space to satisfy the budget. Then we will decommit the tail end of the last region in gen 0.
  • Loading branch information
PeterSolMS committed Mar 7, 2022
1 parent 0022738 commit 942430c
Show file tree
Hide file tree
Showing 2 changed files with 147 additions and 48 deletions.
184 changes: 143 additions & 41 deletions src/coreclr/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11431,6 +11431,7 @@ void gc_heap::init_heap_segment (heap_segment* seg, gc_heap* hp
heap_segment_plan_allocated (seg) = heap_segment_mem (seg);
heap_segment_allocated (seg) = heap_segment_mem (seg);
heap_segment_saved_allocated (seg) = heap_segment_mem (seg);
heap_segment_decommit_target (seg) = heap_segment_reserved (seg);
#ifdef BACKGROUND_GC
heap_segment_background_allocated (seg) = 0;
heap_segment_saved_bg_allocated (seg) = 0;
Expand Down Expand Up @@ -11546,6 +11547,7 @@ void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
{
if (use_large_pages_p)
return;

uint8_t* page_start = align_on_page (heap_segment_allocated(seg));
assert (heap_segment_committed (seg) >= page_start);

Expand All @@ -11561,12 +11563,6 @@ void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
size_t gc_heap::decommit_heap_segment_pages_worker (heap_segment* seg,
uint8_t* new_committed)
{
#ifdef USE_REGIONS
if (!dt_high_memory_load_p())
{
return 0;
}
#endif
assert (!use_large_pages_p);
uint8_t* page_start = align_on_page (new_committed);
ptrdiff_t size = heap_segment_committed (seg) - page_start;
Expand Down Expand Up @@ -12351,8 +12347,7 @@ void gc_heap::distribute_free_regions()
heap_budget_in_region_units[i][large_free_region] = 0;
for (int gen = soh_gen0; gen < total_generation_count; gen++)
{
ptrdiff_t budget_gen = hp->estimate_gen_growth (gen);
assert (budget_gen >= 0);
ptrdiff_t budget_gen = max (hp->estimate_gen_growth (gen), 0);
int kind = gen >= loh_generation;
size_t budget_gen_in_region_units = (budget_gen + (region_size[kind] - 1)) / region_size[kind];
dprintf (REGIONS_LOG, ("h%2d gen %d has an estimated growth of %Id bytes (%Id regions)", i, gen, budget_gen, budget_gen_in_region_units));
Expand Down Expand Up @@ -12499,7 +12494,6 @@ void gc_heap::distribute_free_regions()
}

#ifdef MULTIPLE_HEAPS
gradual_decommit_in_progress_p = FALSE;
for (int kind = basic_free_region; kind < count_free_region_kinds; kind++)
{
if (global_regions_to_decommit[kind].get_num_free_regions() != 0)
Expand Down Expand Up @@ -22142,7 +22136,7 @@ void gc_heap::garbage_collect (int n)
}

descr_generations ("BEGIN");
#ifdef TRACE_GC
#if defined(TRACE_GC) && defined(USE_REGIONS)
if (heap_number == 0)
{
#ifdef MULTIPLE_HEAPS
Expand All @@ -22166,7 +22160,7 @@ void gc_heap::garbage_collect (int n)
}
}
}
#endif // TRACE_GC
#endif // TRACE_GC && USE_REGIONS

#ifdef VERIFY_HEAP
if ((GCConfig::GetHeapVerifyLevel() & GCConfig::HEAPVERIFY_GC) &&
Expand Down Expand Up @@ -30221,7 +30215,7 @@ heap_segment* gc_heap::find_first_valid_region (heap_segment* region, bool compa
set_region_plan_gen_num (current_region, plan_gen_num);
}

if (gen_num != 0)
if (gen_num >= soh_gen2)
{
dprintf (REGIONS_LOG, (" gen%d decommit end of region %Ix(%Ix)",
gen_num, current_region, heap_segment_mem (current_region)));
Expand Down Expand Up @@ -39562,7 +39556,7 @@ ptrdiff_t gc_heap::estimate_gen_growth (int gen_number)
gen_number, heap_number, budget_gen, new_allocation_gen, free_list_space_gen));
#endif //USE_REGIONS

return max(0, budget_gen);
return budget_gen;
}

void gc_heap::decommit_ephemeral_segment_pages()
Expand All @@ -39573,14 +39567,71 @@ void gc_heap::decommit_ephemeral_segment_pages()
}

#if defined(MULTIPLE_HEAPS) && defined(USE_REGIONS)
// for regions, this is done at the regions level
return;
for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++)
{
generation *gen = generation_of (gen_number);
heap_segment* tail_region = generation_tail_region (gen);
uint8_t* previous_decommit_target = heap_segment_decommit_target (tail_region);

// reset the decommit targets to make sure we don't decommit inadvertently
for (heap_segment* region = generation_start_segment_rw (gen); region != nullptr; region = heap_segment_next (region))
{
heap_segment_decommit_target (region) = heap_segment_reserved (region);
}

ptrdiff_t budget_gen = estimate_gen_growth (gen_number) + loh_size_threshold;

if (budget_gen >= 0)
{
// we need more than the regions we have - nothing to decommit
continue;
}

// we may have too much committed - let's see if we can decommit in the tail region
ptrdiff_t tail_region_size = heap_segment_reserved (tail_region) - heap_segment_mem (tail_region);
ptrdiff_t unneeded_tail_size = min (-budget_gen, tail_region_size);
uint8_t *decommit_target = heap_segment_reserved (tail_region) - unneeded_tail_size;
decommit_target = max (decommit_target, heap_segment_allocated (tail_region));

if (decommit_target < previous_decommit_target)
{
// we used to have a higher target - do exponential smoothing by computing
// essentially decommit_target = 1/3*decommit_target + 2/3*previous_decommit_target
// computation below is slightly different to avoid overflow
ptrdiff_t target_decrease = previous_decommit_target - decommit_target;
decommit_target += target_decrease * 2 / 3;
}

//#define STRESS_DECOMMIT 1
#ifdef STRESS_DECOMMIT
// our decommit logic should work for a random decommit target within tail_region - make sure it does
decommit_target = heap_segment_mem (tail_region) + gc_rand::get_rand (heap_segment_reserved (tail_region) - heap_segment_mem (tail_region));
#endif //STRESS_DECOMMIT

heap_segment_decommit_target (tail_region) = decommit_target;

if (decommit_target < heap_segment_committed (tail_region))
{
gradual_decommit_in_progress_p = TRUE;

dprintf (1, ("h%2d gen %d reduce_commit by %IdkB",
heap_number,
gen_number,
(heap_segment_committed (tail_region) - decommit_target)/1024));
}
dprintf(3, ("h%2d gen %d allocated: %IdkB committed: %IdkB target: %IdkB",
heap_number,
gen_number,
(heap_segment_allocated (tail_region) - heap_segment_mem (tail_region))/1024,
(heap_segment_committed (tail_region) - heap_segment_mem (tail_region))/1024,
(decommit_target - heap_segment_mem (tail_region))/1024));
}
#else //MULTIPLE_HEAPS && USE_REGIONS

dynamic_data* dd0 = dynamic_data_of (0);

ptrdiff_t desired_allocation = dd_new_allocation (dd0) +
estimate_gen_growth (soh_gen1) +
max (estimate_gen_growth (soh_gen1), 0) +
loh_size_threshold;

size_t slack_space =
Expand Down Expand Up @@ -39687,7 +39738,11 @@ bool gc_heap::decommit_step ()
}
}
}
#else //USE_REGIONS
if (use_large_pages_p)
{
return (decommit_size != 0);
}
#endif //USE_REGIONS
#ifdef MULTIPLE_HEAPS
// should never get here for large pages because decommit_ephemeral_segment_pages
// will not do anything if use_large_pages_p is true
Expand All @@ -39699,46 +39754,93 @@ bool gc_heap::decommit_step ()
decommit_size += hp->decommit_ephemeral_segment_pages_step ();
}
#endif //MULTIPLE_HEAPS
#endif //USE_REGIONS
return (decommit_size != 0);
}

#ifdef MULTIPLE_HEAPS
// return the decommitted size
#ifndef USE_REGIONS
size_t gc_heap::decommit_ephemeral_segment_pages_step ()
{
// we rely on desired allocation not being changed outside of GC
assert (ephemeral_heap_segment->saved_desired_allocation == dd_desired_allocation (dynamic_data_of (0)));

uint8_t* decommit_target = heap_segment_decommit_target (ephemeral_heap_segment);
size_t EXTRA_SPACE = 2 * OS_PAGE_SIZE;
decommit_target += EXTRA_SPACE;
uint8_t* committed = heap_segment_committed (ephemeral_heap_segment);
if (decommit_target < committed)
size_t size = 0;
#ifdef USE_REGIONS
for (int gen_number = soh_gen0; gen_number <= soh_gen1; gen_number++)
{
generation* gen = generation_of (gen_number);
heap_segment* seg = generation_tail_region (gen);
#else // USE_REGIONS
{
// we rely on other threads not messing with committed if we are about to trim it down
assert (ephemeral_heap_segment->saved_committed == heap_segment_committed (ephemeral_heap_segment));
heap_segment* seg = ephemeral_heap_segment;
// we rely on desired allocation not being changed outside of GC
assert (seg->saved_desired_allocation == dd_desired_allocation (dynamic_data_of (0)));
#endif // USE_REGIONS

// how much would we need to decommit to get to decommit_target in one step?
size_t full_decommit_size = (committed - decommit_target);
uint8_t* decommit_target = heap_segment_decommit_target (seg);
size_t EXTRA_SPACE = 2 * OS_PAGE_SIZE;
decommit_target += EXTRA_SPACE;
#ifdef STRESS_DECOMMIT
// our decommit logic should work for a random decommit target within tail_region - make sure it does
// tail region now may be different from what decommit_ephemeral_segment_pages saw
decommit_target = heap_segment_mem (seg) + gc_rand::get_rand (heap_segment_reserved (seg) - heap_segment_mem (seg));
#endif //STRESS_DECOMMIT
uint8_t* committed = heap_segment_committed (seg);
uint8_t* allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg);
if ((allocated <= decommit_target) && (decommit_target < committed))
{
#ifdef USE_REGIONS
if (gen_number == soh_gen0)
{
// for gen 0, sync with the allocator by taking the more space lock
// and re-read the variables
//
// we call try_enter_spin_lock here instead of enter_spin_lock because
// calling enter_spin_lock from this thread can deadlock at the start
// of a GC - if gc_started is already true, we call wait_for_gc_done(),
// but we are on GC thread 0, so GC cannot make progress
if (!try_enter_spin_lock (&more_space_lock_soh))
{
continue;
}
add_saved_spinlock_info (false, me_acquire, mt_decommit_step);
seg = generation_tail_region (gen);
#ifndef STRESS_DECOMMIT
decommit_target = heap_segment_decommit_target (seg);
decommit_target += EXTRA_SPACE;
#endif
committed = heap_segment_committed (seg);
allocated = (seg == ephemeral_heap_segment) ? alloc_allocated : heap_segment_allocated (seg);
}
if ((allocated <= decommit_target) && (decommit_target < committed))
#else // USE_REGIONS
// we rely on other threads not messing with committed if we are about to trim it down
assert (seg->saved_committed == heap_segment_committed (seg));
#endif // USE_REGIONS
{
// how much would we need to decommit to get to decommit_target in one step?
size_t full_decommit_size = (committed - decommit_target);

// don't do more than max_decommit_step_size per step
size_t decommit_size = min (max_decommit_step_size, full_decommit_size);
// don't do more than max_decommit_step_size per step
size_t decommit_size = min (max_decommit_step_size, full_decommit_size);

// figure out where the new committed should be
uint8_t* new_committed = (committed - decommit_size);
size_t size = decommit_heap_segment_pages_worker (ephemeral_heap_segment, new_committed);
// figure out where the new committed should be
uint8_t* new_committed = (committed - decommit_size);
size += decommit_heap_segment_pages_worker (seg, new_committed);

#ifdef _DEBUG
ephemeral_heap_segment->saved_committed = committed - size;
seg->saved_committed = committed - size;
#endif // _DEBUG

return size;
}
#ifdef USE_REGIONS
if (gen_number == soh_gen0)
{
// for gen 0, we took the more space lock - leave it again
add_saved_spinlock_info (false, me_release, mt_decommit_step);
leave_spin_lock (&more_space_lock_soh);
}
#endif // USE_REGIONS
}
}
return 0;
return size;
}
#endif //!USE_REGIONS
#endif //MULTIPLE_HEAPS

//This is meant to be called by decide_on_compacting.
Expand Down
11 changes: 4 additions & 7 deletions src/coreclr/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -1036,7 +1036,8 @@ enum msl_take_state
mt_alloc_large_cant,
mt_try_alloc,
mt_try_budget,
mt_try_servo_budget
mt_try_servo_budget,
mt_decommit_step
};

enum msl_enter_state
Expand Down Expand Up @@ -2012,10 +2013,10 @@ class gc_heap
void reset_heap_segment_pages (heap_segment* seg);
PER_HEAP
void decommit_heap_segment_pages (heap_segment* seg, size_t extra_space);
#if defined(MULTIPLE_HEAPS) && !defined(USE_REGIONS)
#if defined(MULTIPLE_HEAPS)
PER_HEAP
size_t decommit_ephemeral_segment_pages_step ();
#endif //MULTIPLE_HEAPS && !USE_REGIONS
#endif //MULTIPLE_HEAPS
PER_HEAP
size_t decommit_heap_segment_pages_worker (heap_segment* seg, uint8_t *new_committed);
PER_HEAP_ISOLATED
Expand Down Expand Up @@ -5596,9 +5597,7 @@ class heap_segment
size_t saved_desired_allocation;
#endif // _DEBUG
#endif //MULTIPLE_HEAPS
#if !defined(MULTIPLE_HEAPS) || !defined(USE_REGIONS)
uint8_t* decommit_target;
#endif //!MULTIPLE_HEAPS || !USE_REGIONS
uint8_t* plan_allocated;
// In the plan phase we change the allocated for a seg but we need this
// value to correctly calculate how much space we can reclaim in
Expand Down Expand Up @@ -5897,13 +5896,11 @@ uint8_t*& heap_segment_committed (heap_segment* inst)
{
return inst->committed;
}
#if !defined(MULTIPLE_HEAPS) || !defined(USE_REGIONS)
inline
uint8_t*& heap_segment_decommit_target (heap_segment* inst)
{
return inst->decommit_target;
}
#endif //!MULTIPLE_HEAPS || !USE_REGIONS
inline
uint8_t*& heap_segment_used (heap_segment* inst)
{
Expand Down

0 comments on commit 942430c

Please sign in to comment.