diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp index 207a62355dc7..f7126f55c908 100644 --- a/src/coreclr/gc/gc.cpp +++ b/src/coreclr/gc/gc.cpp @@ -7868,18 +7868,12 @@ bool gc_heap::is_in_condemned_gc (uint8_t* o) return true; } -// REGIONS TODO - -// This method can be called by GCHeap::Promote/Relocate which means -// it could be in the heap range but not actually in a valid region. -// This would return true but find_object will return 0. But this -// seems counter-intuitive so we should consider a better implementation. +// This needs to check the range that's covered by bookkeeping because find_object will +// need to look at the brick table. inline -bool gc_heap::is_in_condemned (uint8_t* o) +bool gc_heap::is_in_bookkeeping_range (uint8_t* o) { - if ((o >= g_gc_lowest_address) && (o < g_gc_highest_address)) - return is_in_condemned_gc (o); - else - return false; + return ((o >= g_gc_lowest_address) && (o < bookkeeping_covered_committed)); } inline @@ -45433,7 +45427,7 @@ void GCHeap::Promote(Object** ppObject, ScanContext* sc, uint32_t flags) gc_heap* hp = gc_heap::heap_of (o); #ifdef USE_REGIONS - if (!gc_heap::is_in_condemned (o)) + if (!gc_heap::is_in_bookkeeping_range (o) || !gc_heap::is_in_condemned_gc (o)) #else //USE_REGIONS if ((o < hp->gc_low) || (o >= hp->gc_high)) #endif //USE_REGIONS @@ -45492,7 +45486,12 @@ void GCHeap::Relocate (Object** ppObject, ScanContext* sc, //dprintf (3, ("Relocate location %Ix\n", (size_t)ppObject)); dprintf (3, ("R: %Ix", (size_t)ppObject)); - if (!object || !((object >= g_gc_lowest_address) && (object < g_gc_highest_address))) + if (!object +#ifdef USE_REGIONS + || !gc_heap::is_in_bookkeeping_range (object)) +#else //USE_REGIONS + || !((object >= g_gc_lowest_address) && (object < g_gc_highest_address))) +#endif //USE_REGIONS return; gc_heap* hp = gc_heap::heap_of (object); @@ -45947,17 +45946,16 @@ GCHeap::GetContainingObject (void *pInteriorPtr, bool fCollectedGenOnly) gc_heap* hp = gc_heap::heap_of (o); #ifdef USE_REGIONS - if (fCollectedGenOnly) + if (gc_heap::is_in_bookkeeping_range (o)) { - if (!gc_heap::is_in_condemned (o)) + if (fCollectedGenOnly && !gc_heap::is_in_condemned_gc (o)) { return NULL; } } else { - if (!((o >= g_gc_lowest_address) && (o < g_gc_highest_address))) - return NULL; + return NULL; } #else //USE_REGIONS diff --git a/src/coreclr/gc/gcpriv.h b/src/coreclr/gc/gcpriv.h index c70ea95c85bd..4eb6a452b4e6 100644 --- a/src/coreclr/gc/gcpriv.h +++ b/src/coreclr/gc/gcpriv.h @@ -52,7 +52,7 @@ inline void FATAL_GC_ERROR() // This means any empty regions can be freely used for any generation. For // Server GC we will balance regions between heaps. // For now disable regions for StandAlone GC, NativeAOT and MacOS builds -#if defined (HOST_64BIT) && !defined (BUILD_AS_STANDALONE) && !defined(__APPLE__) && !defined(FEATURE_NATIVEAOT) +#if defined (HOST_64BIT) && !defined (BUILD_AS_STANDALONE) && !defined(__APPLE__) #define USE_REGIONS #endif //HOST_64BIT && BUILD_AS_STANDALONE @@ -3127,7 +3127,7 @@ class gc_heap bool is_in_condemned_gc (uint8_t* o); // requires checking if o is in the heap range first. PER_HEAP_ISOLATED - bool is_in_condemned (uint8_t* o); + bool is_in_bookkeeping_range (uint8_t* o); PER_HEAP_ISOLATED bool should_check_brick_for_reloc (uint8_t* o); #endif //USE_REGIONS diff --git a/src/coreclr/vm/gcenv.ee.cpp b/src/coreclr/vm/gcenv.ee.cpp index 8b289e9c7442..5202e81c692b 100644 --- a/src/coreclr/vm/gcenv.ee.cpp +++ b/src/coreclr/vm/gcenv.ee.cpp @@ -274,6 +274,36 @@ void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId()); } +#ifdef FEATURE_GREENTHREADS + { + if (sc->promotion && green_head.next) + { + SuspendedGreenThread* current_green_thread = green_head.next; + while (current_green_thread != &green_tail) + { + GreenThreadStackList* current_thread_stack_segment = current_green_thread->currentThreadStackSegment; + while (current_thread_stack_segment->prev != nullptr) + { + current_thread_stack_segment = current_thread_stack_segment->prev; + } + while (current_thread_stack_segment != nullptr) + { + PTR_PTR_Object green_object_base = (PTR_PTR_Object)current_thread_stack_segment->stackRange.stackBase; + PTR_PTR_Object green_object_limit = (PTR_PTR_Object)current_thread_stack_segment->stackRange.stackLimit; + PTR_PTR_Object green_current = green_object_base; + while (green_current > green_object_limit) + { + fn(green_current, sc, GC_CALL_INTERIOR|GC_CALL_PINNED); + green_current--; + } + current_thread_stack_segment = current_thread_stack_segment->next; + } + current_green_thread = current_green_thread->next; + } + } + } +#endif //FEATURE_GREENTHREADS + // In server GC, we should be competing for marking the statics // It's better to do this *after* stack scanning, because this way // we can make up for imbalances in stack scanning diff --git a/src/coreclr/vm/gcenv.ee.standalone.cpp b/src/coreclr/vm/gcenv.ee.standalone.cpp index 9ad0c4c380e7..48a9eac8f36b 100644 --- a/src/coreclr/vm/gcenv.ee.standalone.cpp +++ b/src/coreclr/vm/gcenv.ee.standalone.cpp @@ -17,6 +17,8 @@ #include "genanalysis.h" #include "eventpipeadapter.h" +#include "greenthreads.h" + // the method table for the WeakReference class extern MethodTable* pWeakReferenceMT; diff --git a/src/coreclr/vm/gcenv.ee.static.cpp b/src/coreclr/vm/gcenv.ee.static.cpp index ab8a3f22f8cb..72dd645d47f1 100644 --- a/src/coreclr/vm/gcenv.ee.static.cpp +++ b/src/coreclr/vm/gcenv.ee.static.cpp @@ -17,6 +17,8 @@ #include "genanalysis.h" #include "eventpipeadapter.h" +#include "greenthreads.h" + // the method table for the WeakReference class extern MethodTable* pWeakReferenceMT; diff --git a/src/coreclr/vm/greenthreads.cpp b/src/coreclr/vm/greenthreads.cpp index 99c16f43de9b..1045b2f72235 100644 --- a/src/coreclr/vm/greenthreads.cpp +++ b/src/coreclr/vm/greenthreads.cpp @@ -17,8 +17,11 @@ struct GreenThreadData GreenThreadStackList *pStackListCurrent; bool inGreenThread; bool transitionedToOSThreadOnGreenThread; + SuspendedGreenThread* suspendedGreenThread; }; +extern SuspendedGreenThread green_head = {}; +extern SuspendedGreenThread green_tail = {}; thread_local GreenThreadData t_greenThread; @@ -161,9 +164,8 @@ SuspendedGreenThread* ProduceSuspendedGreenThreadStruct() if (t_greenThread.inGreenThread) { // This is a suspension scenario - SuspendedGreenThread *pNewSuspendedThread = (SuspendedGreenThread*)malloc(sizeof(SuspendedGreenThread)); + SuspendedGreenThread* pNewSuspendedThread = t_greenThread.suspendedGreenThread; pNewSuspendedThread->currentStackPointer = t_greenThread.greenThreadStackCurrent; - pNewSuspendedThread->currentThreadStackSegment = t_greenThread.pStackListCurrent; pNewSuspendedThread->greenThreadFrame = t_greenThread.pFrameInGreenThread; CleanGreenThreadState(); @@ -241,7 +243,19 @@ bool GreenThread_Yield() // Attempt to yield out of green thread. If the yield f } GetThread()->m_pFrame = t_greenThread.pFrameInGreenThread->PtrNextFrame(); - // TODO! Enable GC Tracking of green thread at this point. + // TODO: AndrewAu: What if we don't have sufficient memory for this? + SuspendedGreenThread *pNewSuspendedThread = (SuspendedGreenThread*)malloc(sizeof(SuspendedGreenThread)); + pNewSuspendedThread->currentThreadStackSegment = t_greenThread.pStackListCurrent; + t_greenThread.suspendedGreenThread = pNewSuspendedThread; + + // TODO: AndrewAu: Synchronization in case multiple threads yield/resume at the same time. + if (green_head.next == nullptr) { green_head.next = &green_tail; } + if (green_tail.next == nullptr) { green_tail.prev = &green_head; } + + pNewSuspendedThread->prev = green_tail.prev; + green_tail.prev->next = pNewSuspendedThread; + green_tail.prev = pNewSuspendedThread; + pNewSuspendedThread->next = &green_tail; } YieldOutOfGreenThreadHelper(&t_greenThread.osStackRange, t_greenThread.osStackCurrent, &t_greenThread.greenThreadStackCurrent); @@ -250,7 +264,12 @@ bool GreenThread_Yield() // Attempt to yield out of green thread. If the yield f GCX_COOP(); // At this point we've resumed, and the stack is now in the new state way, but the Frame chain is not hooked up. - // TODO! Disable separate GC tracking of green thread at this point + SuspendedGreenThread* pNewSuspendedThread = t_greenThread.suspendedGreenThread; + t_greenThread.suspendedGreenThread = nullptr; + + pNewSuspendedThread->next->prev = pNewSuspendedThread->prev; + pNewSuspendedThread->prev->next = pNewSuspendedThread->next; + free(pNewSuspendedThread); t_greenThread.pFrameInGreenThread->UNSAFE_SetNextFrame(t_greenThread.pFrameInOSThread); GetThread()->m_pFrame = t_greenThread.pFrameInGreenThread; @@ -306,8 +325,7 @@ SuspendedGreenThread* GreenThread_ResumeThread(SuspendedGreenThread* pSuspendedT t_greenThread.pStackListCurrent = pSuspendedThread->currentThreadStackSegment; t_greenThread.greenThreadStackCurrent = pSuspendedThread->currentStackPointer; - - free(pSuspendedThread); + t_greenThread.suspendedGreenThread = pSuspendedThread; ResumeSuspendedThreadHelper(); return ProduceSuspendedGreenThreadStruct(); diff --git a/src/coreclr/vm/greenthreads.h b/src/coreclr/vm/greenthreads.h index 92bf1805094e..c62014d3bb4d 100644 --- a/src/coreclr/vm/greenthreads.h +++ b/src/coreclr/vm/greenthreads.h @@ -22,8 +22,10 @@ struct GreenThreadStackList struct SuspendedGreenThread { uint8_t* currentStackPointer; - GreenThreadStackList *currentThreadStackSegment; + GreenThreadStackList* currentThreadStackSegment; Frame* greenThreadFrame; + SuspendedGreenThread* prev; + SuspendedGreenThread* next; }; typedef uintptr_t (*TakesOneParam)(uintptr_t param); @@ -41,4 +43,8 @@ void DestroyGreenThread(SuspendedGreenThread* pSuspendedThread); // bool GreenThread_IsGreenThread(); +// TODO: AndrewAu: Better naming +extern SuspendedGreenThread green_head; +extern SuspendedGreenThread green_tail; + #endif // GREENTHREADS_H \ No newline at end of file