Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/coreclr/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2619,6 +2619,7 @@ size_t gc_heap::eph_gen_starts_size = 0;
heap_segment* gc_heap::segment_standby_list;
#endif //USE_REGIONS
bool gc_heap::use_large_pages_p = 0;
bool gc_heap::large_pages_fake_mode_p = 0;
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

probably better to name this large_pages_force_mode or something.

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd prefer large_pages_simulation_mode_p or large_pages_emulation_mode_p

#ifdef HEAP_BALANCE_INSTRUMENTATION
size_t gc_heap::last_gc_end_time_us = 0;
#endif //HEAP_BALANCE_INSTRUMENTATION
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/gc/gcconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class GCConfigStringHolder
BOOL_CONFIG (ConfigLogEnabled, "GCConfigLogEnabled", NULL, false, "Specifies the name of the GC config log file") \
BOOL_CONFIG (GCNumaAware, "GCNumaAware", NULL, true, "Enables numa allocations in the GC") \
BOOL_CONFIG (GCCpuGroup, "GCCpuGroup", "System.GC.CpuGroup", false, "Enables CPU groups in the GC") \
BOOL_CONFIG (GCLargePages, "GCLargePages", "System.GC.LargePages", false, "Enables using Large Pages in the GC") \
INT_CONFIG (GCLargePages, "GCLargePages", "System.GC.LargePages", 0, "Enables Large Pages in the GC (1=real large pages, 2=fake mode for testing)") \
INT_CONFIG (HeapVerifyLevel, "HeapVerify", NULL, HEAPVERIFY_NONE, "When set verifies the integrity of the managed heap on entry and exit of each GC") \
INT_CONFIG (LOHCompactionMode, "GCLOHCompact", NULL, 0, "Specifies the LOH compaction mode") \
INT_CONFIG (LOHThreshold, "GCLOHThreshold", "System.GC.LOHThreshold", LARGE_OBJECT_SIZE, "Specifies the size that will make objects go on LOH") \
Expand Down
4 changes: 3 additions & 1 deletion src/coreclr/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -2485,7 +2485,7 @@ class gc_heap
PER_HEAP_METHOD void decommit_heap_segment (heap_segment* seg);
PER_HEAP_ISOLATED_METHOD bool virtual_alloc_commit_for_heap (void* addr, size_t size, int h_number);
PER_HEAP_ISOLATED_METHOD bool virtual_commit (void* address, size_t size, int bucket, int h_number=-1, bool* hard_limit_exceeded_p=NULL);
PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1, void* end_of_data=nullptr);
PER_HEAP_ISOLATED_METHOD bool virtual_decommit (void* address, size_t size, int bucket, int h_number=-1);
PER_HEAP_ISOLATED_METHOD void reduce_committed_bytes (void* address, size_t size, int bucket, int h_number, bool decommit_succeeded_p);
friend void destroy_card_table (uint32_t*);
PER_HEAP_ISOLATED_METHOD void destroy_card_table_helper (uint32_t* c_table);
Expand Down Expand Up @@ -5369,7 +5369,9 @@ class gc_heap
#endif

// Indicate to use large pages. This only works if hardlimit is also enabled.
// GCLargePages=1 uses real OS large pages, GCLargePages=2 fakes it for testing.
PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool use_large_pages_p;
PER_HEAP_ISOLATED_FIELD_INIT_ONLY bool large_pages_fake_mode_p;

#ifdef MULTIPLE_HEAPS
// Init-ed in gc_heap::initialize_gc
Expand Down
19 changes: 16 additions & 3 deletions src/coreclr/gc/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -882,13 +882,23 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
// Right now all the non mark array portions are commmitted since I'm calling make_card_table
// on the whole range. This can be committed as needed.
size_t reserve_size = regions_range;
uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_large_pages_p);
// In fake large pages mode, use normal reserve (not real large pages) then
// commit all upfront to simulate the "always committed" property.
bool use_real_large_pages = use_large_pages_p && !large_pages_fake_mode_p;
uint8_t* reserve_range = (uint8_t*)virtual_alloc (reserve_size, use_real_large_pages);
if (!reserve_range)
{
log_init_error_to_host ("Reserving %zd bytes (%zd GiB) for the regions range failed, do you have a virtual memory limit set on this process?",
reserve_size, gib (reserve_size));
return E_OUTOFMEMORY;
}
if (large_pages_fake_mode_p)
{
if (!GCToOSInterface::VirtualCommit (reserve_range, reserve_size))
{
return E_OUTOFMEMORY;
}
}

if (!global_region_allocator.init (reserve_range, (reserve_range + reserve_size),
((size_t)1 << min_segment_size_shr),
Expand All @@ -909,8 +919,9 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
heap_hard_limit_oh[soh] &&
(GCConfig::GetGCHeapHardLimitPOH() == 0) &&
(GCConfig::GetGCHeapHardLimitPOHPercent() == 0);
bool use_real_large_pages = use_large_pages_p && !large_pages_fake_mode_p;
if (!reserve_initial_memory (soh_segment_size, loh_segment_size, poh_segment_size, number_of_heaps,
use_large_pages_p, separated_poh_p, heap_no_to_numa_node))
use_real_large_pages, separated_poh_p, heap_no_to_numa_node))
return E_OUTOFMEMORY;
if (use_large_pages_p)
{
Expand Down Expand Up @@ -1279,7 +1290,9 @@ bool gc_heap::compute_hard_limit()
heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH();

#ifdef HOST_64BIT
use_large_pages_p = GCConfig::GetGCLargePages();
int64_t large_pages_config = GCConfig::GetGCLargePages();
use_large_pages_p = (large_pages_config != 0);
large_pages_fake_mode_p = (large_pages_config == 2);
#endif //HOST_64BIT

if (heap_hard_limit_oh[soh] || heap_hard_limit_oh[loh] || heap_hard_limit_oh[poh])
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/gc/interface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ HRESULT GCHeap::Initialize()
{
return CLR_E_GC_LARGE_PAGE_MISSING_HARD_LIMIT;
}
GCConfig::SetGCLargePages(gc_heap::use_large_pages_p);
GCConfig::SetGCLargePages(gc_heap::use_large_pages_p ? (gc_heap::large_pages_fake_mode_p ? 2 : 1) : 0);

#ifdef USE_REGIONS
gc_heap::regions_range = (size_t)GCConfig::GetGCRegionRange();
Expand Down
28 changes: 18 additions & 10 deletions src/coreclr/gc/memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ void gc_heap::reduce_committed_bytes (void* address, size_t size, int bucket, in
}
}

bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number, void* end_of_data)
bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_number)
{
/**
* Here are all possible cases for the decommits:
Expand All @@ -171,15 +171,12 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu
* Case 3: This is for free - the bucket will be recorded_committed_free_bucket, and the h_number will be -1
*/

bool decommit_succeeded_p = ((bucket != recorded_committed_bookkeeping_bucket) && use_large_pages_p) ? true : GCToOSInterface::VirtualDecommit (address, size);
// With large pages, VirtualDecommit on heap memory is a no-op. All such callers
// should either skip the decommit or handle stale data themselves (decommit_region
// does the latter by calling reduce_committed_bytes directly and clearing memory).
assert (!use_large_pages_p || bucket == recorded_committed_bookkeeping_bucket);

// Large pages: the decommit above is a no-op so memory retains stale data.
// Clear up to end_of_data if the caller provided it so that the heap never
// observes leftover object references after the region is reused.
if (use_large_pages_p && (end_of_data != nullptr) && (end_of_data > address))
{
memclr ((uint8_t*)address, (uint8_t*)end_of_data - (uint8_t*)address);
}
bool decommit_succeeded_p = GCToOSInterface::VirtualDecommit (address, size);
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

think you need a similar fix in gc_heap::decommit_region ?


reduce_committed_bytes (address, size, bucket, h_number, decommit_succeeded_p);

Expand Down Expand Up @@ -347,7 +344,18 @@ size_t gc_heap::decommit_region (heap_segment* region, int bucket, int h_number)
uint8_t* page_start = align_lower_page (get_region_start (region));
uint8_t* decommit_end = heap_segment_committed (region);
size_t decommit_size = decommit_end - page_start;
bool decommit_succeeded_p = virtual_decommit (page_start, decommit_size, bucket, h_number);
bool decommit_succeeded_p;
if (use_large_pages_p)
{
// VirtualDecommit is a no-op for large pages so skip it and update
// committed bookkeeping directly. Memory clearing is handled below.
decommit_succeeded_p = true;
reduce_committed_bytes (page_start, decommit_size, bucket, h_number, true);
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If decommit is a noop, why are we reducing comitted bytes?

}
else
{
decommit_succeeded_p = virtual_decommit (page_start, decommit_size, bucket, h_number);
}
bool require_clearing_memory_p = !decommit_succeeded_p || use_large_pages_p;
dprintf (REGIONS_LOG, ("decommitted region %p(%p-%p) (%zu bytes) - success: %d",
region,
Expand Down
52 changes: 32 additions & 20 deletions src/coreclr/gc/regions_segments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1522,6 +1522,12 @@ size_t gc_heap::decommit_heap_segment_pages_worker (heap_segment* seg,
//decommit all pages except one or 2
void gc_heap::decommit_heap_segment (heap_segment* seg)
{
// For large pages, VirtualDecommit is a no-op so skip the decommit entirely
// to avoid lowering committed/used bookkeeping while memory retains stale data.
if (use_large_pages_p)
{
return;
}
#ifdef USE_REGIONS
if (!dt_high_memory_load_p())
{
Expand Down Expand Up @@ -1814,32 +1820,38 @@ void gc_heap::distribute_free_regions()
while (decommit_step(DECOMMIT_TIME_STEP_MILLISECONDS))
{
}
#ifdef MULTIPLE_HEAPS
for (int i = 0; i < n_heaps; i++)
// For large pages, VirtualDecommit on in-use regions is a no-op so the
// memory is never actually returned to the OS. Skip the tail decommit
// entirely to avoid misleading bookkeeping and unnecessary memclr overhead.
if (!use_large_pages_p)
{
gc_heap* hp = g_heaps[i];
int hn = i;
#ifdef MULTIPLE_HEAPS
for (int i = 0; i < n_heaps; i++)
{
gc_heap* hp = g_heaps[i];
int hn = i;
#else //MULTIPLE_HEAPS
{
gc_heap* hp = pGenGCHeap;
int hn = 0;
#endif //MULTIPLE_HEAPS
for (int i = 0; i < total_generation_count; i++)
{
generation* generation = hp->generation_of (i);
heap_segment* region = heap_segment_rw (generation_start_segment (generation));
while (region != nullptr)
gc_heap* hp = pGenGCHeap;
int hn = 0;
#endif //MULTIPLE_HEAPS
for (int i = 0; i < total_generation_count; i++)
{
uint8_t* aligned_allocated = align_on_page (heap_segment_allocated (region));
size_t end_space = heap_segment_committed (region) - aligned_allocated;
if (end_space > 0)
generation* generation = hp->generation_of (i);
heap_segment* region = heap_segment_rw (generation_start_segment (generation));
while (region != nullptr)
{
virtual_decommit (aligned_allocated, end_space, gen_to_oh (i), hn, heap_segment_used (region));
heap_segment_committed (region) = aligned_allocated;
heap_segment_used (region) = min (heap_segment_used (region), heap_segment_committed (region));
assert (heap_segment_committed (region) > heap_segment_mem (region));
uint8_t* aligned_allocated = align_on_page (heap_segment_allocated (region));
size_t end_space = heap_segment_committed (region) - aligned_allocated;
if (end_space > 0)
{
virtual_decommit (aligned_allocated, end_space, gen_to_oh (i), hn);
heap_segment_committed (region) = aligned_allocated;
heap_segment_used (region) = min (heap_segment_used (region), heap_segment_committed (region));
assert (heap_segment_committed (region) > heap_segment_mem (region));
}
region = heap_segment_next_rw (region);
}
region = heap_segment_next_rw (region);
}
}
}
Expand Down
96 changes: 96 additions & 0 deletions src/tests/GC/API/GC/Collect_Aggressive_LargePages.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

using System;
using System.Collections.Concurrent;
using System.Runtime.CompilerServices;
using System.Threading;
using Xunit;

// Regression test for https://github.com/dotnet/runtime/issues/126903
// Verifies that aggressive GC does not corrupt the heap under (fake) large pages.
// The fake large pages mode (DOTNET_GCLargePagesFakeMode=1) exercises the same
// GC code paths as real large pages without requiring OS-level large page setup.
public class AggressiveCollectLargePages
{
const int DurationMs = 3000;
const int WriterCount = 4;

[Fact]
public static int TestEntryPoint()
{
var dict = new ConcurrentDictionary<int, byte[]>();
var cts = new CancellationTokenSource(DurationMs);
var token = cts.Token;
int errors = 0;

Thread[] writers = new Thread[WriterCount];
for (int t = 0; t < WriterCount; t++)
{
int tid = t;
writers[t] = new Thread(() =>
{
try
{
int i = tid * 1_000_000;
while (!token.IsCancellationRequested)
{
dict[i] = new byte[100];
i++;
if ((i % 1000) == 0)
{
dict.Clear();
}
}
}
catch (Exception ex)
{
Console.WriteLine($"Writer {tid} caught: {ex.GetType().Name}: {ex.Message}");
Interlocked.Increment(ref errors);
}
});
writers[t].IsBackground = true;
writers[t].Start();
}

Thread gcThread = new Thread(() =>
{
while (!token.IsCancellationRequested)
{
CreateGarbage();
GC.Collect(2, GCCollectionMode.Aggressive, blocking: true, compacting: true);
Thread.Sleep(50);
}
});
gcThread.IsBackground = true;
gcThread.Start();

gcThread.Join();
for (int t = 0; t < WriterCount; t++)
{
writers[t].Join();
}

if (errors > 0)
{
Console.WriteLine($"FAIL: {errors} writer(s) hit exceptions (heap corruption).");
return 101;
}

Console.WriteLine("PASS: No heap corruption detected.");
return 100;
}

[MethodImpl(MethodImplOptions.NoInlining)]
static void CreateGarbage()
{
byte[][] small = new byte[500][];
for (int i = 0; i < small.Length; i++)
{
small[i] = new byte[4000];
}
byte[] large = new byte[8 * 1024 * 1024];
GC.KeepAlive(small);
GC.KeepAlive(large);
}
}
18 changes: 18 additions & 0 deletions src/tests/GC/API/GC/Collect_Aggressive_LargePages.csproj
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<!-- Needed for CLRTestEnvironmentVariable -->
<RequiresProcessIsolation>true</RequiresProcessIsolation>
<CLRTestTargetUnsupported Condition="'$(RuntimeFlavor)' != 'coreclr'">true</CLRTestTargetUnsupported>
<CLRTestPriority>0</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="Collect_Aggressive_LargePages.cs" />
</ItemGroup>
<ItemGroup>
<CLRTestEnvironmentVariable Include="DOTNET_GCLargePages" Value="2" />
<CLRTestEnvironmentVariable Include="DOTNET_GCHeapHardLimit" Value="0xC0000000" />
</ItemGroup>
</Project>
Loading