Permalink
11403 lines (9489 sloc) 346 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
// THREADS.CPP
//
//
//
#include "common.h"
#include "frames.h"
#include "threads.h"
#include "stackwalk.h"
#include "excep.h"
#include "comsynchronizable.h"
#include "log.h"
#include "gcheaputilities.h"
#include "mscoree.h"
#include "dbginterface.h"
#include "corprof.h" // profiling
#include "eeprofinterfaces.h"
#include "eeconfig.h"
#include "perfcounters.h"
#include "corhost.h"
#include "win32threadpool.h"
#include "jitinterface.h"
#include "eventtrace.h"
#include "comutilnative.h"
#include "finalizerthread.h"
#include "threadsuspend.h"
#include "wrappers.h"
#include "nativeoverlapped.h"
#include "mdaassistants.h"
#include "appdomain.inl"
#include "vmholder.h"
#include "exceptmacros.h"
#include "win32threadpool.h"
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#include "interoputil.h"
#include "interoputil.inl"
#endif // FEATURE_COMINTEROP
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
#include "olecontexthelpers.h"
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
#ifdef FEATURE_PERFTRACING
#include "eventpipebuffermanager.h"
#endif // FEATURE_PERFTRACING
SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore);
CONTEXT *ThreadStore::s_pOSContext = NULL;
CLREvent *ThreadStore::s_pWaitForStackCrawlEvent;
#ifndef DACCESS_COMPILE
BOOL Thread::s_fCleanFinalizedThread = FALSE;
Volatile<LONG> Thread::s_threadPoolCompletionCountOverflow = 0;
CrstStatic g_DeadlockAwareCrst;
#if defined(_DEBUG)
BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId )
{
#ifndef FEATURE_PAL
LIMITED_METHOD_CONTRACT;
DWORD id = GetThreadId(h);
// OS call GetThreadId may fail, and return 0. In this case we can not
// make a decision if the two match or not. Instead, we ignore this check.
return id == 0 || id == osId;
#else // !FEATURE_PAL
return TRUE;
#endif // !FEATURE_PAL
}
#endif // _DEBUG
#ifdef _DEBUG_IMPL
template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_COOPERATIVE;
}
template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_PREEMPTIVE;
}
template<> void GCAssert<TRUE>::BeginGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_COOPERATIVE;
}
template<> void GCAssert<FALSE>::BeginGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_PREEMPTIVE;
}
#endif
// #define NEW_TLS 1
#ifdef _DEBUG
void Thread::SetFrame(Frame *pFrame)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
DEBUG_ONLY;
MODE_COOPERATIVE;
// It only makes sense for a Thread to call SetFrame on itself.
PRECONDITION(this == GetThread());
PRECONDITION(CheckPointer(pFrame));
}
CONTRACTL_END;
if (g_pConfig->fAssertOnFailFast())
{
Frame *pWalk = m_pFrame;
BOOL fExist = FALSE;
while (pWalk != (Frame*) -1)
{
if (pWalk == pFrame)
{
fExist = TRUE;
break;
}
pWalk = pWalk->m_Next;
}
pWalk = m_pFrame;
while (fExist && pWalk != pFrame && pWalk != (Frame*)-1)
{
if (pWalk->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
{
_ASSERTE (((ContextTransitionFrame *)pWalk)->GetReturnDomain() == m_pDomain);
}
pWalk = pWalk->m_Next;
}
}
m_pFrame = pFrame;
// If stack overrun corruptions are expected, then skip this check
// as the Frame chain may have been corrupted.
if (g_pConfig->fAssertOnFailFast() == false)
return;
Frame* espVal = (Frame*)GetCurrentSP();
while (pFrame != (Frame*) -1)
{
static Frame* stopFrame = 0;
if (pFrame == stopFrame)
_ASSERTE(!"SetFrame frame == stopFrame");
_ASSERTE(espVal < pFrame);
_ASSERTE(pFrame < m_CacheStackBase);
_ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT);
pFrame = pFrame->m_Next;
}
}
#endif // _DEBUG
//************************************************************************
// PRIVATE GLOBALS
//************************************************************************
extern unsigned __int64 getTimeStamp();
extern unsigned __int64 getTickFrequency();
unsigned __int64 tgetFrequency() {
static unsigned __int64 cachedFreq = (unsigned __int64) -1;
if (cachedFreq != (unsigned __int64) -1)
return cachedFreq;
else {
cachedFreq = getTickFrequency();
return cachedFreq;
}
}
#endif // #ifndef DACCESS_COMPILE
static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData)
{
WRAPPER_NO_CONTRACT;
// It suffices to wait for the first CrawlFrame with non-NULL function
MethodDesc *pMD = pCF->GetFunction();
if (pMD != NULL)
{
*(bool *)pData = pMD->IsILStub();
return SWA_ABORT;
}
return SWA_CONTINUE;
}
// This is really just a heuristic to detect if we are executing in an M2U IL stub or
// one of the marshaling methods it calls. It doesn't deal with U2M IL stubs.
// We loop through the frame chain looking for an uninitialized TransitionFrame.
// If there is one, then we are executing in an M2U IL stub or one of the methods it calls.
// On the other hand, if there is an initialized TransitionFrame, then we are not.
// Also, if there is an HMF on the stack, then we stop. This could be the case where
// an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to
// stop in those cases. Some examples are COMException..ctor and custom marshalers.
//
// X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with
// inlined P/Invoke when judging just from the frame chain. We use stack walk to decide
// this case.
bool Thread::DetectHandleILStubsForDebugger()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
Frame* pFrame = GetFrame();
if (pFrame != NULL)
{
while (pFrame != FRAME_TOP)
{
// Check for HMF's. See the comment at the beginning of this function.
if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr())
{
break;
}
// If there is an entry frame (i.e. U2M managed), we should break.
else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY)
{
break;
}
// Check for M2U transition frames. See the comment at the beginning of this function.
else if (pFrame->GetFrameType() == Frame::TYPE_EXIT)
{
if (pFrame->GetReturnAddress() == NULL)
{
// If the return address is NULL, then the frame has not been initialized yet.
// We may see InlinedCallFrame in ordinary methods as well. Have to do
// stack walk to find out if this is really an IL stub.
bool fInILStub = false;
StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback,
&fInILStub,
QUICKUNWIND,
dac_cast<PTR_Frame>(pFrame));
if (fInILStub) return true;
}
else
{
// The frame is fully initialized.
return false;
}
}
pFrame = pFrame->Next();
}
}
return false;
}
extern "C" {
#ifndef __llvm__
__declspec(thread)
#else // !__llvm__
__thread
#endif // !__llvm__
ThreadLocalInfo gCurrentThreadInfo =
{
NULL, // m_pThread
NULL, // m_pAppDomain
NULL, // m_EETlsData
};
} // extern "C"
// index into TLS Array. Definition added by compiler
EXTERN_C UINT32 _tls_index;
#ifndef DACCESS_COMPILE
BOOL SetThread(Thread* t)
{
LIMITED_METHOD_CONTRACT
gCurrentThreadInfo.m_pThread = t;
return TRUE;
}
BOOL SetAppDomain(AppDomain* ad)
{
LIMITED_METHOD_CONTRACT
gCurrentThreadInfo.m_pAppDomain = ad;
return TRUE;
}
BOOL Thread::Alert ()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
BOOL fRetVal = FALSE;
{
HANDLE handle = GetThreadHandle();
if (handle != INVALID_HANDLE_VALUE && handle != SWITCHOUT_HANDLE_VALUE)
{
fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code);
}
}
return fRetVal;
}
DWORD Thread::Join(DWORD timeout, BOOL alertable)
{
WRAPPER_NO_CONTRACT;
return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None);
}
DWORD Thread::JoinEx(DWORD timeout, WaitMode mode)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE;
Thread *pCurThread = GetThread();
_ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread());
{
// We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be
// forwarded to a SynchronizationContext if needed.
mode = (WaitMode)(mode & ~WaitMode_InDeadlock);
HANDLE handle = GetThreadHandle();
if (handle == INVALID_HANDLE_VALUE || handle == SWITCHOUT_HANDLE_VALUE) {
return WAIT_FAILED;
}
if (pCurThread) {
return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode);
}
else {
return WaitForSingleObjectEx(handle,timeout,alertable);
}
}
}
extern INT32 MapFromNTPriority(INT32 NTPriority);
BOOL Thread::SetThreadPriority(
int nPriority // thread priority level
)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
BOOL fRet;
{
if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
// When the thread starts running, we will set the thread priority.
fRet = TRUE;
}
else
fRet = ::SetThreadPriority(GetThreadHandle(), nPriority);
}
if (fRet)
{
GCX_COOP();
THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
if (pObject != NULL)
{
// TODO: managed ThreadPriority only supports up to 4.
pObject->SetPriority (MapFromNTPriority(nPriority));
}
}
return fRet;
}
int Thread::GetThreadPriority()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
int nRetVal = -1;
if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
nRetVal = FALSE;
}
else
nRetVal = ::GetThreadPriority(GetThreadHandle());
return nRetVal;
}
void Thread::ChooseThreadCPUGroupAffinity()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
return;
//Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads
ThreadStoreLockHolder TSLockHolder(TRUE);
// this thread already has CPU group affinity set
if (m_pAffinityMask != 0)
return;
if (GetThreadHandle() == INVALID_HANDLE_VALUE)
return;
GROUP_AFFINITY groupAffinity;
CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity);
CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL);
m_wCPUGroup = groupAffinity.Group;
m_pAffinityMask = groupAffinity.Mask;
}
void Thread::ClearThreadCPUGroupAffinity()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
return;
ThreadStoreLockHolder TSLockHolder(TRUE);
// this thread does not have CPU group affinity set
if (m_pAffinityMask == 0)
return;
GROUP_AFFINITY groupAffinity;
groupAffinity.Group = m_wCPUGroup;
groupAffinity.Mask = m_pAffinityMask;
CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity);
m_wCPUGroup = 0;
m_pAffinityMask = 0;
}
DWORD Thread::StartThread()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
DWORD dwRetVal = (DWORD) -1;
#ifdef _DEBUG
_ASSERTE (m_Creater.IsCurrentThread());
m_Creater.Clear();
#endif
_ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE &&
GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
dwRetVal = ::ResumeThread(GetThreadHandle());
return dwRetVal;
}
// Class static data:
LONG Thread::m_DebugWillSyncCount = -1;
LONG Thread::m_DetachCount = 0;
LONG Thread::m_ActiveDetachCount = 0;
int Thread::m_offset_counter = 0;
Volatile<LONG> Thread::m_threadsAtUnsafePlaces = 0;
//-------------------------------------------------------------------------
// Public function: SetupThreadNoThrow()
// Creates Thread for current thread if not previously created.
// Returns NULL for failure (usually due to out-of-memory.)
//-------------------------------------------------------------------------
Thread* SetupThreadNoThrow(HRESULT *pHR)
{
CONTRACTL {
NOTHROW;
SO_TOLERANT;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
HRESULT hr = S_OK;
Thread *pThread = GetThread();
if (pThread != NULL)
{
return pThread;
}
EX_TRY
{
pThread = SetupThread();
}
EX_CATCH
{
// We failed SetupThread. GET_EXCEPTION() may depend on Thread object.
if (__pException == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
hr = GET_EXCEPTION()->GetHR();
}
}
EX_END_CATCH(SwallowAllExceptions);
if (pHR)
{
*pHR = hr;
}
return pThread;
}
void DeleteThread(Thread* pThread)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
//_ASSERTE (pThread == GetThread());
SetThread(NULL);
SetAppDomain(NULL);
if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore))
{
pThread->DetachThread(FALSE);
}
else
{
#ifdef FEATURE_COMINTEROP
pThread->RevokeApartmentSpy();
#endif // FEATURE_COMINTEROP
FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead);
// ~Thread() calls SafeSetThrowables which has a conditional contract
// which says that if you call it with a NULL throwable then it is
// MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that
// and assumes that we're violating the MODE_COOPERATIVE.
CONTRACT_VIOLATION(ModeViolation);
delete pThread;
}
}
void EnsurePreemptive()
{
WRAPPER_NO_CONTRACT;
Thread *pThread = GetThread();
if (pThread && pThread->PreemptiveGCDisabled())
{
pThread->EnablePreemptiveGC();
}
}
typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException;
Thread* SetupThread(BOOL fInternal)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
SO_TOLERANT;
}
CONTRACTL_END;
Thread* pThread;
if ((pThread = GetThread()) != NULL)
return pThread;
#ifdef FEATURE_STACK_PROBE
RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), NULL);
#endif //FEATURE_STACK_PROBE
CONTRACT_VIOLATION(SOToleranceViolation);
// For interop debugging, we must mark that we're in a can't-stop region
// b.c we may take Crsts here that may block the helper thread.
// We're especially fragile here b/c we don't have a Thread object yet
CantStopHolder hCantStop;
EnsurePreemptiveModeIfException ensurePreemptive;
#ifdef _DEBUG
CHECK chk;
if (g_pConfig->SuppressChecks())
{
// EnterAssert will suppress any checks
chk.EnterAssert();
}
#endif
// Normally, HasStarted is called from the thread's entrypoint to introduce it to
// the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
// that call into managed code. In that case, a call to SetupThread here must
// find the correct Thread object and install it into TLS.
if (ThreadStore::s_pThreadStore->m_PendingThreadCount != 0)
{
DWORD ourOSThreadId = ::GetCurrentThreadId();
{
ThreadStoreLockHolder TSLockHolder;
_ASSERTE(pThread == NULL);
while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL)
{
if (pThread->GetOSThreadId() == ourOSThreadId)
{
break;
}
}
if (pThread != NULL)
{
STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load());
}
}
// It's perfectly reasonable to not find this guy. It's just an unrelated
// thread spinning up.
if (pThread)
{
if (IsThreadPoolWorkerSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
pThread->SetBackground(TRUE);
}
else if (IsThreadPoolIOCompletionSpecialThread())
{
FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
pThread->SetBackground(TRUE);
}
else if (IsTimerSpecialThread() || IsWaitSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
pThread->SetBackground(TRUE);
}
BOOL fStatus = pThread->HasStarted();
ensurePreemptive.SuppressRelease();
return fStatus ? pThread : NULL;
}
}
// First time we've seen this thread in the runtime:
pThread = new Thread();
// What state are we in here? COOP???
Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread);
CExecutionEngine::SetupTLSForThread(pThread);
// A host can deny a thread entering runtime by returning a NULL IHostTask.
// But we do want threads used by threadpool.
if (IsThreadPoolWorkerSpecialThread() ||
IsThreadPoolIOCompletionSpecialThread() ||
IsTimerSpecialThread() ||
IsWaitSpecialThread())
{
fInternal = TRUE;
}
if (!pThread->InitThread(fInternal) ||
!pThread->PrepareApartmentAndContext())
ThrowOutOfMemory();
// reset any unstarted bits on the thread object
FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted);
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin);
ThreadStore::AddThread(pThread);
BOOL fOK = SetThread(pThread);
_ASSERTE (fOK);
fOK = SetAppDomain(pThread->GetDomain());
_ASSERTE (fOK);
#ifdef FEATURE_INTEROP_DEBUGGING
// Ensure that debugger word slot is allocated
UnsafeTlsSetValue(g_debuggerWordTLSIndex, 0);
#endif
// We now have a Thread object visable to the RS. unmark special status.
hCantStop.Release();
pThread->SetupThreadForHost();
threadHolder.SuppressRelease();
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized);
#ifdef _DEBUG
pThread->AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
#endif
#ifdef DEBUGGING_SUPPORTED
//
// If we're debugging, let the debugger know that this
// thread is up and running now.
//
if (CORDebuggerAttached())
{
g_pDebugInterface->ThreadCreated(pThread);
}
else
{
LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId()));
}
#endif // DEBUGGING_SUPPORTED
#ifdef PROFILING_SUPPORTED
// If a profiler is present, then notify the profiler that a
// thread has been created.
if (!IsGCSpecialThread())
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ThreadCreated(
(ThreadID)pThread);
}
DWORD osThreadId = ::GetCurrentThreadId();
g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
(ThreadID)pThread, osThreadId);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
_ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking
pThread->SetBackground(TRUE);
ensurePreemptive.SuppressRelease();
if (IsThreadPoolWorkerSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
}
else if (IsThreadPoolIOCompletionSpecialThread())
{
FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
}
else if (IsTimerSpecialThread() || IsWaitSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
}
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
{
pThread->QueryThreadProcessorUsage();
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
#ifdef FEATURE_EVENT_TRACE
ETW::ThreadLog::FireThreadCreated(pThread);
#endif // FEATURE_EVENT_TRACE
return pThread;
}
//-------------------------------------------------------------------------
void STDMETHODCALLTYPE CorMarkThreadInThreadPool()
{
LIMITED_METHOD_CONTRACT;
BEGIN_ENTRYPOINT_VOIDRET;
END_ENTRYPOINT_VOIDRET;
// this is no longer needed after our switch to
// the Win32 threadpool.
// keeping in mscorwks for compat reasons and to keep rotor sscoree and
// mscoree consistent.
}
//-------------------------------------------------------------------------
// Public function: SetupUnstartedThread()
// This sets up a Thread object for an exposed System.Thread that
// has not been started yet. This allows us to properly enumerate all threads
// in the ThreadStore, so we can report on even unstarted threads. Clearly
// there is no physical thread to match, yet.
//
// When there is, complete the setup with code:Thread::HasStarted()
//-------------------------------------------------------------------------
Thread* SetupUnstartedThread(BOOL bRequiresTSL)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
Thread* pThread = new Thread();
FastInterlockOr((ULONG *) &pThread->m_State,
(Thread::TS_Unstarted | Thread::TS_WeOwn));
ThreadStore::AddThread(pThread, bRequiresTSL);
return pThread;
}
//-------------------------------------------------------------------------
// Public function: DestroyThread()
// Destroys the specified Thread object, for a thread which is about to die.
//-------------------------------------------------------------------------
void DestroyThread(Thread *th)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE (th == GetThread());
_ASSERTE(g_fEEShutDown || th->m_dwLockCount == 0 || th->m_fRudeAborted);
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
{
AppDomain* pDomain = th->GetDomain();
pDomain->UpdateProcessorUsage(th->QueryThreadProcessorUsage());
FireEtwThreadTerminated((ULONGLONG)th, (ULONGLONG)pDomain, GetClrInstanceId());
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
th->FinishSOWork();
GCX_PREEMP_NO_DTOR();
if (th->IsAbortRequested()) {
// Reset trapping count.
th->UnmarkThreadForAbort(Thread::TAR_ALL);
}
// Clear any outstanding stale EH state that maybe still active on the thread.
#ifdef WIN64EXCEPTIONS
ExceptionTracker::PopTrackers((void*)-1);
#else // !WIN64EXCEPTIONS
#ifdef _TARGET_X86_
PTR_ThreadExceptionState pExState = th->GetExceptionState();
if (pExState->IsExceptionInProgress())
{
GCX_COOP();
pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
}
#else // !_TARGET_X86_
#error Unsupported platform
#endif // _TARGET_X86_
#endif // WIN64EXCEPTIONS
#ifdef FEATURE_PERFTRACING
// Before the thread dies, mark its buffers as no longer owned
// so that they can be cleaned up after the thread dies.
EventPipeBufferList *pBufferList = th->GetEventPipeBufferList();
if(pBufferList != NULL)
{
pBufferList->SetOwnedByThread(false);
}
#endif // FEATURE_PERFTRACING
if (g_fEEShutDown == 0)
{
th->SetThreadState(Thread::TS_ReportDead);
th->OnThreadTerminate(FALSE);
}
}
//-------------------------------------------------------------------------
// Public function: DetachThread()
// Marks the thread as needing to be destroyed, but doesn't destroy it yet.
//-------------------------------------------------------------------------
HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
{
// !!! Can not use contract here.
// !!! Contract depends on Thread object for GC_TRIGGERS.
// !!! At the end of this function, we call InternalSwitchOut,
// !!! and then GetThread()=NULL, and dtor of contract does not work any more.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
// @todo . We need to probe here, but can't introduce destructors etc.
BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
// Clear any outstanding stale EH state that maybe still active on the thread.
#ifdef WIN64EXCEPTIONS
ExceptionTracker::PopTrackers((void*)-1);
#else // !WIN64EXCEPTIONS
#ifdef _TARGET_X86_
PTR_ThreadExceptionState pExState = GetExceptionState();
if (pExState->IsExceptionInProgress())
{
GCX_COOP();
pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
}
#else // !_TARGET_X86_
#error Unsupported platform
#endif // _TARGET_X86_
#endif // WIN64EXCEPTIONS
#ifdef FEATURE_COMINTEROP
IErrorInfo *pErrorInfo;
// Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH,
// otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData).
if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK)
{
// if this is our IErrorInfo, release it now - we don't want ole32 to do it later as
// part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point
if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo))
{
// if it's not our IErrorInfo, put it back
SetErrorInfo(0, pErrorInfo);
}
pErrorInfo->Release();
}
// Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH
// (COM will do it or may have already done it automatically in that case).
if (!fDLLThreadDetach)
{
RevokeApartmentSpy();
}
#endif // FEATURE_COMINTEROP
_ASSERTE(!PreemptiveGCDisabled());
_ASSERTE(g_fEEShutDown || m_dwLockCount == 0 || m_fRudeAborted);
_ASSERTE ((m_State & Thread::TS_Detached) == 0);
_ASSERTE (this == GetThread());
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM && m_pDomain)
{
m_pDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
FireEtwThreadTerminated((ULONGLONG)this, (ULONGLONG)m_pDomain, GetClrInstanceId());
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
FinishSOWork();
FastInterlockIncrement(&Thread::m_DetachCount);
if (IsAbortRequested()) {
// Reset trapping count.
UnmarkThreadForAbort(Thread::TAR_ALL);
}
if (!IsBackground())
{
FastInterlockIncrement(&Thread::m_ActiveDetachCount);
ThreadStore::CheckForEEShutdown();
}
END_CONTRACT_VIOLATION;
InternalSwitchOut();
#ifdef ENABLE_CONTRACTS_DATA
m_pClrDebugState = NULL;
#endif //ENABLE_CONTRACTS_DATA
#ifdef FEATURE_PERFTRACING
// Before the thread dies, mark its buffers as no longer owned
// so that they can be cleaned up after the thread dies.
EventPipeBufferList *pBufferList = m_pEventPipeBufferList.Load();
if(pBufferList != NULL)
{
pBufferList->SetOwnedByThread(false);
}
#endif // FEATURE_PERFTRACING
FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
// Do not touch Thread object any more. It may be destroyed.
// These detached threads will be cleaned up by finalizer thread. But if the process uses
// little managed heap, it will be a while before GC happens, and finalizer thread starts
// working on detached thread. So we wake up finalizer thread to clean up resources.
//
// (It's possible that this is the startup thread, and startup failed, and so the finalization
// machinery isn't fully initialized. Hence this check.)
if (g_fEEStarted)
FinalizerThread::EnableFinalization();
return S_OK;
}
DWORD GetRuntimeId()
{
LIMITED_METHOD_CONTRACT;
return _tls_index;
}
//---------------------------------------------------------------------------
// Creates new Thread for reverse p-invoke calls.
//---------------------------------------------------------------------------
Thread* WINAPI CreateThreadBlockThrow()
{
WRAPPER_NO_CONTRACT;
// This is a workaround to disable our check for throwing exception in SetupThread.
// We want to throw an exception for reverse p-invoke, and our assertion may fire if
// a unmanaged caller does not setup an exception handler.
CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code.
Thread* pThread = NULL;
BEGIN_ENTRYPOINT_THROWS;
if (!CanRunManagedCode())
{
// CLR is shutting down - someone's DllMain detach event may be calling back into managed code.
// It is misleading to use our COM+ exception code, since this is not a managed exception.
ULONG_PTR arg = E_PROCESS_SHUTDOWN_REENTRY;
RaiseException(EXCEPTION_EXX, 0, 1, &arg);
}
HRESULT hr = S_OK;
pThread = SetupThreadNoThrow(&hr);
if (pThread == NULL)
{
// Creating Thread failed, and we need to throw an exception to report status.
// It is misleading to use our COM+ exception code, since this is not a managed exception.
ULONG_PTR arg = hr;
RaiseException(EXCEPTION_EXX, 0, 1, &arg);
}
END_ENTRYPOINT_THROWS;
return pThread;
}
#ifdef _DEBUG
DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
#endif
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
//---------------------------------------------------------------------------
// One-time initialization. Called during Dll initialization. So
// be careful what you do in here!
//---------------------------------------------------------------------------
void InitThreadManager()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
InitializeYieldProcessorNormalizedCrst();
// All patched helpers should fit into one page.
// If you hit this assert on retail build, there is most likely problem with BBT script.
_ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
// I am using virtual protect to cover the entire range that this code falls in.
//
// We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
// so instead we'll leave it writable from here forward.
DWORD oldProt;
if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
PAGE_EXECUTE_READWRITE, &oldProt))
{
_ASSERTE(!"ClrVirtualProtect of code page failed");
COMPlusThrowWin32();
}
#ifndef FEATURE_PAL
_ASSERTE(GetThread() == NULL);
PTEB Teb = NtCurrentTeb();
BYTE** tlsArray = (BYTE**)Teb->ThreadLocalStoragePointer;
BYTE* tlsData = (BYTE*)tlsArray[_tls_index];
size_t offsetOfCurrentThreadInfo = (BYTE*)&gCurrentThreadInfo - tlsData;
_ASSERTE(offsetOfCurrentThreadInfo < 0x8000);
_ASSERTE(_tls_index < 0x10000);
// Save gCurrentThreadInfo location for debugger
g_TlsIndex = (DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000);
_ASSERTE(g_TrapReturningThreads == 0);
#endif // !FEATURE_PAL
#ifdef FEATURE_INTEROP_DEBUGGING
g_debuggerWordTLSIndex = UnsafeTlsAlloc();
if (g_debuggerWordTLSIndex == TLS_OUT_OF_INDEXES)
COMPlusThrowWin32();
#endif
__ClrFlsGetBlock = CExecutionEngine::GetTlsData;
IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force));
ThreadStore::InitThreadStore();
// NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
// If you remove this flag, we will switch to preemptive mode when entering
// g_DeadlockAwareCrst, which means all functions that enter it will become
// GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure
// to update the contracts if you remove this flag.
g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE);
#ifdef _DEBUG
// Randomize OBJREF_HASH to handle hash collision.
Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10);
#endif // _DEBUG
ThreadSuspend::Initialize();
}
//************************************************************************
// Thread members
//************************************************************************
#if defined(_DEBUG) && defined(TRACK_SYNC)
// One outstanding synchronization held by this thread:
struct Dbg_TrackSyncEntry
{
UINT_PTR m_caller;
AwareLock *m_pAwareLock;
BOOL Equiv (UINT_PTR caller, void *pAwareLock)
{
LIMITED_METHOD_CONTRACT;
return (m_caller == caller) && (m_pAwareLock == pAwareLock);
}
BOOL Equiv (void *pAwareLock)
{
LIMITED_METHOD_CONTRACT;
return (m_pAwareLock == pAwareLock);
}
};
// Each thread has a stack that tracks all enter and leave requests
struct Dbg_TrackSyncStack : public Dbg_TrackSync
{
enum
{
MAX_TRACK_SYNC = 20, // adjust stack depth as necessary
};
void EnterSync (UINT_PTR caller, void *pAwareLock);
void LeaveSync (UINT_PTR caller, void *pAwareLock);
Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC];
UINT_PTR m_StackPointer;
BOOL m_Active;
Dbg_TrackSyncStack() : m_StackPointer(0),
m_Active(TRUE)
{
LIMITED_METHOD_CONTRACT;
}
};
// ensure that registers are preserved across this call
#ifdef _MSC_VER
#pragma optimize("", off)
#endif
// A pain to do all this from ASM, but watch out for trashed registers
EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock)
{
BEGIN_ENTRYPOINT_THROWS;
WRAPPER_NO_CONTRACT;
GetThread()->m_pTrackSync->EnterSync(caller, pAwareLock);
END_ENTRYPOINT_THROWS;
}
EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock)
{
BEGIN_ENTRYPOINT_THROWS;
WRAPPER_NO_CONTRACT;
GetThread()->m_pTrackSync->LeaveSync(caller, pAwareLock);
END_ENTRYPOINT_THROWS;
}
#ifdef _MSC_VER
#pragma optimize("", on)
#endif
void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock)
{
LIMITED_METHOD_CONTRACT;
STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
caller,
((AwareLock*)pAwareLock)->GetRecursionLevel(),
((AwareLock*)pAwareLock)->GetLockState(),
((AwareLock*)pAwareLock)->GetHoldingThread());
if (m_Active)
{
if (m_StackPointer >= MAX_TRACK_SYNC)
{
_ASSERTE(!"Overflowed synchronization stack checking. Disabling");
m_Active = FALSE;
return;
}
}
m_Stack[m_StackPointer].m_caller = caller;
m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock;
m_StackPointer++;
}
void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock)
{
WRAPPER_NO_CONTRACT;
STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
caller,
((AwareLock*)pAwareLock)->GetRecursionLevel(),
((AwareLock*)pAwareLock)->GetLockState(),
((AwareLock*)pAwareLock)->GetHoldingThread());
if (m_Active)
{
if (m_StackPointer == 0)
_ASSERTE(!"Underflow in leaving synchronization");
else
if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock))
{
m_StackPointer--;
}
else
{
for (int i=m_StackPointer - 2; i>=0; i--)
{
if (m_Stack[i].Equiv(pAwareLock))
{
_ASSERTE(!"Locks are released out of order. This might be okay...");
memcpy(&m_Stack[i], &m_Stack[i+1],
sizeof(m_Stack[0]) * (m_StackPointer - i - 1));
return;
}
}
_ASSERTE(!"Trying to release a synchronization lock which isn't held");
}
}
}
#endif // TRACK_SYNC
static DWORD dwHashCodeSeed = 123456789;
#ifdef _DEBUG
void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind)
{
CONTRACTL
{
NOTHROW;
FORBID_FAULT;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
//
// Note: this apparently checks if any one of the supplied conditions is satisified, rather
// than checking that *all* of them are satisfied. One would have expected it to assert all of the
// conditions but it does not.
//
CONTRACT_VIOLATION(FaultViolation);
if (::GetAppDomain()==pDomain)
return;
if ((ADValidityKind & ADV_DEFAULTAD) &&
pDomain->IsDefaultDomain())
return;
if ((ADValidityKind & ADV_ITERATOR) &&
pDomain->IsHeldByIterator())
return;
if ((ADValidityKind & ADV_CREATING) &&
pDomain->IsBeingCreated())
return;
if ((ADValidityKind & ADV_COMPILATION) &&
pDomain->IsCompilationDomain())
return;
if ((ADValidityKind & ADV_FINALIZER) &&
IsFinalizerThread())
return;
if ((ADValidityKind & ADV_ADUTHREAD) &&
IsADUnloadHelperThread())
return;
if ((ADValidityKind & ADV_RUNNINGIN) &&
pDomain->IsRunningIn(GetThread()))
return;
if ((ADValidityKind & ADV_REFTAKER) &&
pDomain->IsHeldByRefTaker())
return;
_ASSERTE(!"Appdomain* can be invalid");
}
#endif
//--------------------------------------------------------------------
// Thread construction
//--------------------------------------------------------------------
Thread::Thread()
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
m_pFrame = FRAME_TOP;
m_pUnloadBoundaryFrame = NULL;
m_fPreemptiveGCDisabled = 0;
#ifdef _DEBUG
m_ulForbidTypeLoad = 0;
m_GCOnTransitionsOK = TRUE;
#endif
#ifdef ENABLE_CONTRACTS
m_pClrDebugState = NULL;
m_ulEnablePreemptiveGCCount = 0;
#endif
// Initialize data members related to thread statics
m_pTLBTable = NULL;
m_TLBTableSize = 0;
m_pThreadLocalBlock = NULL;
m_dwLockCount = 0;
m_dwBeginLockCount = 0;
#ifdef _DEBUG
dbg_m_cSuspendedThreads = 0;
dbg_m_cSuspendedThreadsWithoutOSLock = 0;
m_Creater.Clear();
m_dwUnbreakableLockCount = 0;
#endif
m_dwForbidSuspendThread = 0;
// Initialize lock state
m_pHead = &m_embeddedEntry;
m_embeddedEntry.pNext = m_pHead;
m_embeddedEntry.pPrev = m_pHead;
m_embeddedEntry.dwLLockID = 0;
m_embeddedEntry.dwULockID = 0;
m_embeddedEntry.wReaderLevel = 0;
m_pBlockingLock = NULL;
m_alloc_context.init();
m_thAllocContextObj = 0;
m_UserInterrupt = 0;
m_WaitEventLink.m_Next = NULL;
m_WaitEventLink.m_LinkSB.m_pNext = NULL;
m_ThreadHandle = INVALID_HANDLE_VALUE;
m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
m_ThreadHandleForResume = INVALID_HANDLE_VALUE;
m_WeOwnThreadHandle = FALSE;
#ifdef _DEBUG
m_ThreadId = UNINITIALIZED_THREADID;
#endif //_DEBUG
// Initialize this variable to a very different start value for each thread
// Using linear congruential generator from Knuth Vol. 2, p. 102, line 24
dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1;
m_dwHashCodeSeed = dwHashCodeSeed;
m_hijackLock = FALSE;
m_OSThreadId = 0;
m_Priority = INVALID_THREAD_PRIORITY;
m_ExternalRefCount = 1;
m_UnmanagedRefCount = 0;
m_State = TS_Unstarted;
m_StateNC = TSNC_Unknown;
// It can't be a LongWeakHandle because we zero stuff out of the exposed
// object as it is finalized. At that point, calls to GetCurrentThread()
// had better get a new one,!
m_ExposedObject = CreateGlobalShortWeakHandle(NULL);
GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject);
m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL);
GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject);
m_LastThrownObjectHandle = NULL;
m_ltoIsUnhandled = FALSE;
m_AbortReason = NULL;
m_debuggerFilterContext = NULL;
m_debuggerCantStop = 0;
m_fInteropDebuggingHijacked = FALSE;
m_profilerCallbackState = 0;
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
m_dwProfilerEvacuationCounter = 0;
#endif // FEATURE_PROFAPI_ATTACH_DETACH
m_pProfilerFilterContext = NULL;
m_CacheStackBase = 0;
m_CacheStackLimit = 0;
m_CacheStackSufficientExecutionLimit = 0;
m_LastAllowableStackAddress= 0;
m_ProbeLimit = 0;
#ifdef _DEBUG
m_pCleanedStackBase = NULL;
#endif
#ifdef STACK_GUARDS_DEBUG
m_pCurrentStackGuard = NULL;
#endif
#ifdef FEATURE_HIJACK
m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC;
m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC;
#ifndef PLATFORM_UNIX
X86_ONLY(m_LastRedirectIP = 0);
X86_ONLY(m_SpinCount = 0);
#endif // PLATFORM_UNIX
#endif // FEATURE_HIJACK
#if defined(_DEBUG) && defined(TRACK_SYNC)
m_pTrackSync = new Dbg_TrackSyncStack;
NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync));
#endif // TRACK_SYNC
m_RequestedStackSize = 0;
m_PreventAsync = 0;
m_PreventAbort = 0;
m_nNestedMarshalingExceptions = 0;
m_pDomain = NULL;
#ifdef FEATURE_COMINTEROP
m_fDisableComObjectEagerCleanup = false;
#endif //FEATURE_COMINTEROP
m_fHasDeadThreadBeenConsideredForGCTrigger = false;
m_Context = NULL;
m_TraceCallCount = 0;
m_ThrewControlForThread = 0;
m_OSContext = NULL;
m_ThreadTasks = (ThreadTasks)0;
m_pLoadLimiter= NULL;
m_pLoadingFile = NULL;
// The state and the tasks must be 32-bit aligned for atomicity to be guaranteed.
_ASSERTE((((size_t) &m_State) & 3) == 0);
_ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0);
// Track perf counter for the logical thread object.
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical++);
// On all callbacks, call the trap code, which we now have
// wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more).
if (GCStress<cfg_transition>::IsEnabled())
{
m_State = (ThreadState) (m_State | TS_GCOnTransitions);
}
//m_pSharedStaticData = NULL;
//m_pUnsharedStaticData = NULL;
//m_pStaticDataHash = NULL;
//m_pSDHCrst = NULL;
m_fSecurityStackwalk = FALSE;
m_AbortType = EEPolicy::TA_None;
m_AbortInfo = 0;
m_AbortEndTime = MAXULONGLONG;
m_RudeAbortEndTime = MAXULONGLONG;
m_AbortController = 0;
m_AbortRequestLock = 0;
m_fRudeAbortInitiated = FALSE;
m_pIOCompletionContext = NULL;
#ifdef _DEBUG
m_fRudeAborted = FALSE;
m_dwAbortPoint = 0;
#endif
m_pFiberData = NULL;
m_TaskId = INVALID_TASK_ID;
m_dwConnectionId = INVALID_CONNECTION_ID;
#ifdef _DEBUG
DWORD_PTR *ttInfo = NULL;
size_t nBytes = MaxThreadRecord *
(sizeof(FiberSwitchInfo)-sizeof(size_t)+MaxStackDepth*sizeof(size_t));
if (g_pConfig->SaveThreadInfo()) {
ttInfo = new DWORD_PTR[(nBytes/sizeof(DWORD_PTR))*ThreadTrackInfo_Max];
memset(ttInfo,0,nBytes*ThreadTrackInfo_Max);
}
for (DWORD i = 0; i < ThreadTrackInfo_Max; i ++)
{
m_FiberInfoIndex[i] = 0;
m_pFiberInfo[i] = (FiberSwitchInfo*)((DWORD_PTR)ttInfo + i*nBytes);
}
NewArrayHolder<DWORD_PTR> fiberInfoHolder(ttInfo);
#endif
m_OSContext = new CONTEXT();
NewHolder<CONTEXT> contextHolder(m_OSContext);
m_pSavedRedirectContext = NULL;
NewHolder<CONTEXT> savedRedirectContextHolder(m_pSavedRedirectContext);
#ifdef FEATURE_COMINTEROP
m_pRCWStack = new RCWStackHeader();
#endif
m_pCerPreparationState = NULL;
#ifdef _DEBUG
m_bGCStressing = FALSE;
m_bUniqueStacking = FALSE;
#endif
m_pPendingTypeLoad = NULL;
#ifdef FEATURE_PREJIT
m_pIBCInfo = NULL;
#endif
m_dwAVInRuntimeImplOkayCount = 0;
#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(PLATFORM_UNIX) // GCCOVER
m_fPreemptiveGCDisabledForGCStress = false;
#endif
#ifdef _DEBUG
m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1;
#endif
m_dwHostTaskRefCount = 0;
m_pExceptionDuringStartup = NULL;
#ifdef HAVE_GCCOVER
m_pbDestCode = NULL;
m_pbSrcCode = NULL;
#if defined(GCCOVER_TOLERATE_SPURIOUS_AV)
m_pLastAVAddress = NULL;
#endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV)
#endif // HAVE_GCCOVER
m_fCompletionPortDrained = FALSE;
m_debuggerActivePatchSkipper = NULL;
m_dwThreadHandleBeingUsed = 0;
SetProfilerCallbacksAllowed(TRUE);
m_pCreatingThrowableForException = NULL;
#ifdef _DEBUG
m_dwDisableAbortCheckCount = 0;
#endif // _DEBUG
#ifdef WIN64EXCEPTIONS
m_dwIndexClauseForCatch = 0;
m_sfEstablisherOfActualHandlerFrame.Clear();
#endif // WIN64EXCEPTIONS
m_threadPoolCompletionCount = 0;
Thread *pThread = GetThread();
_ASSERTE(SystemDomain::System()->DefaultDomain()->GetDefaultContext());
InitContext();
_ASSERTE(m_Context);
if (pThread)
{
_ASSERTE(pThread->GetDomain() && pThread->GetDomain()->GetDefaultContext());
// Start off the new thread in the default context of
// the creating thread's appDomain. This could be changed by SetDelegate
SetKickOffDomainId(pThread->GetDomain()->GetId());
} else
SetKickOffDomainId((ADID)DefaultADID);
// Do not expose thread until it is fully constructed
g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId);
//
// DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT.
// NewId() allows this Thread instance to be accessed via a Thread Id. Do not
// add additional construction after this point to prevent the race condition
// of accessing a partially constructed Thread via Thread Id lookup.
//
exposedObjectHolder.SuppressRelease();
strongHndToExposedObjectHolder.SuppressRelease();
#if defined(_DEBUG) && defined(TRACK_SYNC)
trackSyncHolder.SuppressRelease();
#endif
#ifdef _DEBUG
fiberInfoHolder.SuppressRelease();
#endif
contextHolder.SuppressRelease();
savedRedirectContextHolder.SuppressRelease();
managedThreadCurrentCulture = NULL;
managedThreadCurrentUICulture = NULL;
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
m_ullProcessorUsageBaseline = 0;
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
#ifdef FEATURE_COMINTEROP
m_uliInitializeSpyCookie.QuadPart = 0ul;
m_fInitializeSpyRegistered = false;
m_pLastSTACtxCookie = NULL;
#endif // FEATURE_COMINTEROP
m_fGCSpecial = FALSE;
m_wCPUGroup = 0;
m_pAffinityMask = 0;
m_pAllLoggedTypes = NULL;
#ifdef FEATURE_PERFTRACING
m_pEventPipeBufferList = NULL;
m_eventWriteInProgress = false;
memset(&m_activityId, 0, sizeof(m_activityId));
#endif // FEATURE_PERFTRACING
m_HijackReturnKind = RT_Illegal;
}
//--------------------------------------------------------------------
// Failable initialization occurs here.
//--------------------------------------------------------------------
BOOL Thread::InitThread(BOOL fInternal)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
HANDLE hDup = INVALID_HANDLE_VALUE;
BOOL ret = TRUE;
// This message actually serves a purpose (which is why it is always run)
// The Stress log is run during hijacking, when other threads can be suspended
// at arbitrary locations (including when holding a lock that NT uses to serialize
// all memory allocations). By sending a message now, we insure that the stress
// log will not allocate memory at these critical times an avoid deadlock.
STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId());
if ((m_State & TS_WeOwn) == 0)
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads++);
}
else
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical++);
}
#ifndef FEATURE_PAL
// workaround: Remove this when we flow impersonation token to host.
BOOL reverted = FALSE;
HANDLE threadToken = INVALID_HANDLE_VALUE;
#endif // !FEATURE_PAL
if (m_ThreadHandle == INVALID_HANDLE_VALUE)
{
// For WinCE, all clients have the same handle for a thread. Duplication is
// not possible. We make sure we never close this handle unless we created
// the thread (TS_WeOwn).
//
// For Win32, each client has its own handle. This is achieved by duplicating
// the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service
// returns a pseudo-handle which is only useful for duplication. In this case
// each client is responsible for closing its own (duplicated) handle.
//
// We don't bother duplicating if WeOwn, because we created the handle in the
// first place.
// Thread is created when or after the physical thread started running
HANDLE curProcess = ::GetCurrentProcess();
#ifndef FEATURE_PAL
// If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only
// THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include
// THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be
// able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then
// re-impersonate before we leave this routine.
if (!RevertIfImpersonated(&reverted, &threadToken))
{
COMPlusThrowWin32();
}
class EnsureResetThreadToken
{
private:
BOOL m_NeedReset;
HANDLE m_threadToken;
public:
EnsureResetThreadToken(HANDLE threadToken, BOOL reverted)
{
m_threadToken = threadToken;
m_NeedReset = reverted;
}
~EnsureResetThreadToken()
{
UndoRevert(m_NeedReset, m_threadToken);
if (m_threadToken != INVALID_HANDLE_VALUE)
{
CloseHandle(m_threadToken);
}
}
};
EnsureResetThreadToken resetToken(threadToken, reverted);
#endif // !FEATURE_PAL
if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup,
0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS))
{
_ASSERTE(hDup != INVALID_HANDLE_VALUE);
SetThreadHandle(hDup);
m_WeOwnThreadHandle = TRUE;
}
else
{
COMPlusThrowWin32();
}
}
if ((m_State & TS_WeOwn) == 0)
{
if (!AllocHandles())
{
ThrowOutOfMemory();
}
}
_ASSERTE(HasValidThreadHandle());
m_random.Init();
// Set floating point mode to round to nearest
#ifndef FEATURE_PAL
(void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR );
m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
#endif // !FEATURE_PAL
if (m_CacheStackBase == 0)
{
_ASSERTE(m_CacheStackLimit == 0);
_ASSERTE(m_LastAllowableStackAddress == 0);
_ASSERTE(m_ProbeLimit == 0);
ret = SetStackLimits(fAll);
if (ret == FALSE)
{
ThrowOutOfMemory();
}
// We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low me
// situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
ret = Thread::CommitThreadStack(this);
if (ret == FALSE)
{
ThrowOutOfMemory();
}
}
ret = Thread::AllocateIOCompletionContext();
if (!ret)
{
ThrowOutOfMemory();
}
_ASSERTE(ret); // every failure case for ret should throw.
return ret;
}
// Allocate all the handles. When we are kicking of a new thread, we can call
// here before the thread starts running.
BOOL Thread::AllocHandles()
{
WRAPPER_NO_CONTRACT;
_ASSERTE(!m_DebugSuspendEvent.IsValid());
_ASSERTE(!m_EventWait.IsValid());
BOOL fOK = TRUE;
EX_TRY {
// create a manual reset event for getting the thread to a safe point
m_DebugSuspendEvent.CreateManualEvent(FALSE);
m_EventWait.CreateManualEvent(TRUE);
}
EX_CATCH {
fOK = FALSE;
if (!m_DebugSuspendEvent.IsValid()) {
m_DebugSuspendEvent.CloseEvent();
}
if (!m_EventWait.IsValid()) {
m_EventWait.CloseEvent();
}
}
EX_END_CATCH(RethrowTerminalExceptions);
return fOK;
}
//--------------------------------------------------------------------
// This is the alternate path to SetupThread/InitThread. If we created
// an unstarted thread, we have SetupUnstartedThread/HasStarted.
//--------------------------------------------------------------------
BOOL Thread::HasStarted(BOOL bRequiresTSL)
{
CONTRACTL {
NOTHROW;
DISABLED(GC_NOTRIGGER);
SO_TOLERANT;
}
CONTRACTL_END;
// @todo need a probe that tolerates not having a thread setup at all
CONTRACT_VIOLATION(SOToleranceViolation);
_ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here
// This is cheating a little. There is a pathway here from SetupThread, but only
// via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in
// preemptive mode, ready for a transition. But in the IJW case, it can return a
// cooperative mode thread. RunDllMain handles this "surprise" correctly.
m_fPreemptiveGCDisabled = TRUE;
// Normally, HasStarted is called from the thread's entrypoint to introduce it to
// the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
// that call into managed code. In that case, the second HasStarted call is
// redundant and should be ignored.
if (GetThread() == this)
return TRUE;
_ASSERTE(GetThread() == 0);
_ASSERTE(HasValidThreadHandle());
BOOL fKeepTLS = FALSE;
BOOL fCanCleanupCOMState = FALSE;
BOOL res = TRUE;
res = SetStackLimits(fAll);
if (res == FALSE)
{
m_pExceptionDuringStartup = Exception::GetOOMException();
goto FAILURE;
}
// We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low memory
// situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
res = Thread::CommitThreadStack(this);
if (res == FALSE)
{
m_pExceptionDuringStartup = Exception::GetOOMException();
goto FAILURE;
}
// If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup
// which will be thrown in Thread.Start as an internal exception
EX_TRY
{
//
// Initialization must happen in the following order - hosts like SQL Server depend on this.
//
CExecutionEngine::SetupTLSForThread(this);
fCanCleanupCOMState = TRUE;
res = PrepareApartmentAndContext();
if (!res)
{
ThrowOutOfMemory();
}
InitThread(FALSE);
if (SetThread(this) == FALSE)
{
ThrowOutOfMemory();
}
if (SetAppDomain(m_pDomain) == FALSE)
{
ThrowOutOfMemory();
}
#ifdef _DEBUG
AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
#endif
SetupThreadForHost();
ThreadStore::TransferStartedThread(this, bRequiresTSL);
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
{
QueryThreadProcessorUsage();
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
#ifdef FEATURE_EVENT_TRACE
ETW::ThreadLog::FireThreadCreated(this);
#endif // FEATURE_EVENT_TRACE
}
EX_CATCH
{
if (__pException != NULL)
{
__pException.SuppressRelease();
m_pExceptionDuringStartup = __pException;
}
res = FALSE;
}
EX_END_CATCH(SwallowAllExceptions);
FAILURE:
if (res == FALSE)
{
if (m_fPreemptiveGCDisabled)
{
m_fPreemptiveGCDisabled = FALSE;
}
_ASSERTE (HasThreadState(TS_Unstarted));
SetThreadState(TS_FailStarted);
if (GetThread() != NULL && IsAbortRequested())
UnmarkThreadForAbort(TAR_ALL);
if (!fKeepTLS)
{
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
//
// Undo our call to PrepareApartmentAndContext above, so we don't leak a CoInitialize
// If we're keeping TLS, then the host's call to ExitTask will clean this up instead.
//
if (fCanCleanupCOMState)
{
// The thread pointer in TLS may not be set yet, if we had a failure before we set it.
// So we'll set it up here (we'll unset it a few lines down).
if (SetThread(this) != FALSE)
{
CleanupCOMState();
}
}
#endif
FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
ThreadStore::CheckForEEShutdown();
DecExternalCount(/*holdingLock*/ !bRequiresTSL);
SetThread(NULL);
SetAppDomain(NULL);
}
}
else
{
FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized);
#ifdef DEBUGGING_SUPPORTED
//
// If we're debugging, let the debugger know that this
// thread is up and running now.
//
if (CORDebuggerAttached())
{
g_pDebugInterface->ThreadCreated(this);
}
else
{
LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId()));
}
#endif // DEBUGGING_SUPPORTED
#ifdef PROFILING_SUPPORTED
// If a profiler is running, let them know about the new thread.
//
// The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more
// information
if (!IsGCSpecial())
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ThreadCreated((ThreadID) this);
}
GC_ON_TRANSITIONS(gcOnTransition);
DWORD osThreadId = ::GetCurrentThreadId();
g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
(ThreadID) this, osThreadId);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
// CoreCLR does not support user-requested thread suspension
_ASSERTE(!(m_State & TS_SuspendUnstarted));
}
return res;
}
BOOL Thread::AllocateIOCompletionContext()
{
WRAPPER_NO_CONTRACT;
PIOCompletionContext pIOC = new (nothrow) IOCompletionContext;
if(pIOC != NULL)
{
pIOC->lpOverlapped = NULL;
m_pIOCompletionContext = pIOC;
return TRUE;
}
else
{
return FALSE;
}
}
VOID Thread::FreeIOCompletionContext()
{
WRAPPER_NO_CONTRACT;
if (m_pIOCompletionContext != NULL)
{
PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext;
delete pIOC;
m_pIOCompletionContext = NULL;
}
}
void Thread::HandleThreadStartupFailure()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
_ASSERTE(GetThread() != NULL);
struct ProtectArgs
{
OBJECTREF pThrowable;
OBJECTREF pReason;
} args;
memset(&args, 0, sizeof(ProtectArgs));
GCPROTECT_BEGIN(args);
MethodTable *pMT = MscorlibBinder::GetException(kThreadStartException);
args.pThrowable = AllocateObject(pMT);
MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR);
if (m_pExceptionDuringStartup)
{
args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup);
Exception::Delete(m_pExceptionDuringStartup);
m_pExceptionDuringStartup = NULL;
}
ARG_SLOT args1[] = {
ObjToArgSlot(args.pThrowable),
ObjToArgSlot(args.pReason),
};
exceptionCtor.Call(args1);
GCPROTECT_END(); //Prot
RaiseTheExceptionInternalOnly(args.pThrowable, FALSE);
}
#ifndef FEATURE_PAL
BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken)
{
WRAPPER_NO_CONTRACT;
BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails,
TOKEN_IMPERSONATE, // we are not impersonating. There is no win32
TRUE, // api to figure this out. The only alternative
phToken); // is to use NtCurrentTeb->IsImpersonating().
if (bImpersonated)
{
*bReverted = RevertToSelf();
return *bReverted;
}
return TRUE;
}
void UndoRevert(BOOL bReverted, HANDLE hToken)
{
if (bReverted)
{
if (!SetThreadToken(NULL, hToken))
{
_ASSERT("Undo Revert -> SetThreadToken failed");
STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken);
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
}
}
return;
}
#endif // !FEATURE_PAL
// We don't want ::CreateThread() calls scattered throughout the source. So gather
// them all here.
BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
BOOL bRet;
//This assert is here to prevent a bug in the future
// CreateTask currently takes a DWORD and we will downcast
// if that interface changes to take a SIZE_T this Assert needs to be removed.
//
_ASSERTE(stackSize <= 0xFFFFFFFF);
#ifndef FEATURE_PAL
HandleHolder token;
BOOL bReverted = FALSE;
bRet = RevertIfImpersonated(&bReverted, &token);
if (bRet != TRUE)
return bRet;
#endif // !FEATURE_PAL
m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread);
bRet = CreateNewOSThread(stackSize, start, args);
#ifndef FEATURE_PAL
UndoRevert(bReverted, token);
if (pName != NULL)
SetThreadName(m_ThreadHandle, pName);
#endif // !FEATURE_PAL
return bRet;
}
// This is to avoid the 64KB/1MB aliasing problem present on Pentium 4 processors,
// which can significantly impact performance with HyperThreading enabled
DWORD WINAPI Thread::intermediateThreadProc(PVOID arg)
{
WRAPPER_NO_CONTRACT;
m_offset_counter++;
if (m_offset_counter * offset_multiplier > (int) GetOsPageSize())
m_offset_counter = 0;
(void)_alloca(m_offset_counter * offset_multiplier);
intermediateThreadParam* param = (intermediateThreadParam*)arg;
LPTHREAD_START_ROUTINE ThreadFcnPtr = param->lpThreadFunction;
PVOID args = param->lpArg;
delete param;
return ThreadFcnPtr(args);
}
HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName, DWORD flags, DWORD* pThreadId)
{
LIMITED_METHOD_CONTRACT;
// TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative
// here because this is a last-minute fix.
SIZE_T stackSize;
switch (stackSizeBucket)
{
case StackSize_Small:
stackSize = 256 * 1024;
break;
case StackSize_Medium:
stackSize = 512 * 1024;
break;
default:
_ASSERTE(!"Bad stack size bucket");
case StackSize_Large:
stackSize = 1024 * 1024;
break;
}
flags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
DWORD threadId;
HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId);
#ifndef FEATURE_PAL
SetThreadName(hThread, pName);
#endif // !FEATURE_PAL
if (pThreadId)
*pThreadId = threadId;
return hThread;
}
BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
//
// Let's get the stack sizes from the PE file that started process.
//
static SIZE_T ExeSizeOfStackReserve = 0;
static SIZE_T ExeSizeOfStackCommit = 0;
static BOOL fSizesGot = FALSE;
#ifndef FEATURE_PAL
if (!fSizesGot)
{
HINSTANCE hInst = WszGetModuleHandle(NULL);
_ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process.
EX_TRY
{
PEDecoder pe(hInst);
pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit);
fSizesGot = TRUE;
}
EX_CATCH
{
fSizesGot = FALSE;
}
EX_END_CATCH(SwallowAllExceptions);
}
#endif // !FEATURE_PAL
if (!fSizesGot) {
//return some somewhat-reasonable numbers
if (NULL != reserveSize) *reserveSize = 256*1024;
if (NULL != commitSize) *commitSize = 256*1024;
return FALSE;
}
if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve;
if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit;
return TRUE;
}
BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
DWORD ourId = 0;
HANDLE h = NULL;
DWORD dwCreationFlags = CREATE_SUSPENDED;
dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
#ifndef FEATURE_PAL // the PAL does its own adjustments as necessary
if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize())
{
// On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of
// a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB).
sizeToCommitOrReserve = GetOsPageSize() + 1;
}
#endif // !FEATURE_PAL
intermediateThreadParam* lpThreadArgs = new (nothrow) intermediateThreadParam;
if (lpThreadArgs == NULL)
{
return FALSE;
}
NewHolder<intermediateThreadParam> argHolder(lpThreadArgs);
// Make sure we have all our handles, in case someone tries to suspend us
// as we are starting up.
if (!AllocHandles())
{
// OS is out of handles/memory?
return FALSE;
}
lpThreadArgs->lpThreadFunction = start;
lpThreadArgs->lpArg = args;
h = ::CreateThread(NULL /*=SECURITY_ATTRIBUTES*/,
sizeToCommitOrReserve,
intermediateThreadProc,
lpThreadArgs,
dwCreationFlags,
&ourId);
if (h == NULL)
return FALSE;
argHolder.SuppressRelease();
_ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted.
SetThreadHandle(h);
m_WeOwnThreadHandle = TRUE;
// Before we do the resume, we need to take note of the new ThreadId. This
// is necessary because -- before the thread starts executing at KickofThread --
// it may perform some DllMain DLL_THREAD_ATTACH notifications. These could
// call into managed code. During the consequent SetupThread, we need to
// perform the Thread::HasStarted call instead of going through the normal
// 'new thread' pathway.
_ASSERTE(GetOSThreadId() == 0);
_ASSERTE(ourId != 0);
m_OSThreadId = ourId;
FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
#ifdef _DEBUG
m_Creater.SetToCurrentThread();
#endif
return TRUE;
}
//
// #threadDestruction
//
// General comments on thread destruction.
//
// The C++ Thread object can survive beyond the time when the Win32 thread has died.
// This is important if an exposed object has been created for this thread. The
// exposed object will survive until it is GC'ed.
//
// A client like an exposed object can place an external reference count on that
// object. We also place a reference count on it when we construct it, and we lose
// that count when the thread finishes doing useful work (OnThreadTerminate).
//
// One way OnThreadTerminate() is called is when the thread finishes doing useful
// work. This case always happens on the correct thread.
//
// The other way OnThreadTerminate() is called is during product shutdown. We do
// a "best effort" to eliminate all threads except the Main thread before shutdown
// happens. But there may be some background threads or external threads still
// running.
//
// When the final reference count disappears, we destruct. Until then, the thread
// remains in the ThreadStore, but is marked as "Dead".
//<TODO>
// @TODO cwb: for a typical shutdown, only background threads are still around.
// Should we interrupt them? What about the non-typical shutdown?</TODO>
int Thread::IncExternalCount()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
Thread *pCurThread = GetThread();
_ASSERTE(m_ExternalRefCount > 0);
int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount);
// If we have an exposed object and the refcount is greater than one
// we must make sure to keep a strong handle to the exposed object
// so that we keep it alive even if nobody has a reference to it.
if (pCurThread && ((*((void**)m_ExposedObject)) != NULL))
{
// The exposed object exists and needs a strong handle so check
// to see if it has one.
// Only a managed thread can setup StrongHnd.
if ((*((void**)m_StrongHndToExposedObject)) == NULL)
{
GCX_COOP();
// Store the object in the strong handle.
StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject));
}
}
return retVal;
}
int Thread::DecExternalCount(BOOL holdingLock)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// Note that it's possible to get here with a NULL current thread (during
// shutdown of the thread manager).
Thread *pCurThread = GetThread();
_ASSERTE (pCurThread == NULL || IsAtProcessExit()
|| (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread))
|| (holdingLock && ThreadStore::HoldingThreadStore(pCurThread)));
BOOL ToggleGC = FALSE;
BOOL SelfDelete = FALSE;
int retVal;
// Must synchronize count and exposed object handle manipulation. We use the
// thread lock for this, which implies that we must be in pre-emptive mode
// to begin with and avoid any activity that would invoke a GC (this
// acquires the thread store lock).
if (pCurThread)
{
// TODO: we would prefer to use a GC Holder here, however it is hard
// to get the case where we're deleting this thread correct given
// the current macros. We want to supress the release of the holder
// here which puts us in Preemptive mode, and also the switch to
// Cooperative mode below, but since both holders will be named
// the same thing (due to the generic nature of the macro) we can
// not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope
// b/c they will both apply simply to the most narrowly scoped
// holder.
ToggleGC = pCurThread->PreemptiveGCDisabled();
if (ToggleGC)
{
pCurThread->EnablePreemptiveGC();
}
}
GCX_ASSERT_PREEMP();
ThreadStoreLockHolder tsLock(!holdingLock);
_ASSERTE(m_ExternalRefCount >= 1);
_ASSERTE(!holdingLock ||
ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
IsAtProcessExit());
retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount);
if (retVal == 0)
{
HANDLE h = GetThreadHandle();
if (h == INVALID_HANDLE_VALUE)
{
h = m_ThreadHandleForClose;
m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
}
// Can not assert like this. We have already removed the Unstarted bit.
//_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE);
if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle)
{
::CloseHandle(h);
SetThreadHandle(INVALID_HANDLE_VALUE);
}
// Switch back to cooperative mode to manipulate the thread.
if (pCurThread)
{
// TODO: we would prefer to use GCX_COOP here, see comment above.
pCurThread->DisablePreemptiveGC();
}
GCX_ASSERT_COOP();
// during process detach the thread might still be in the thread list
// if it hasn't seen its DLL_THREAD_DETACH yet. Use the following
// tweak to decide if the thread has terminated yet.
if (!HasValidThreadHandle())
{
SelfDelete = this == pCurThread;
m_ExceptionState.FreeAllStackTraces();
if (SelfDelete) {
SetThread(NULL);
#ifdef _DEBUG
AddFiberInfo(ThreadTrackInfo_Lifetime);
#endif
}
delete this;
}
tsLock.Release();
// It only makes sense to restore the GC mode if we didn't just destroy
// our own thread object.
if (pCurThread && !SelfDelete && !ToggleGC)
{
pCurThread->EnablePreemptiveGC();
}
// Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP
// in the same scope above...
//
// GCX_ASSERT_PREEMP()
return retVal;
}
else if (pCurThread == NULL)
{
// We're in shutdown, too late to be worrying about having a strong
// handle to the exposed thread object, we've already performed our
// final GC.
tsLock.Release();
return retVal;
}
else
{
// Check to see if the external ref count reaches exactly one. If this
// is the case and we have an exposed object then it is that exposed object
// that is holding a reference to us. To make sure that we are not the
// ones keeping the exposed object alive we need to remove the strong
// reference we have to it.
if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL))
{
// Switch back to cooperative mode to manipulate the object.
// Don't want to switch back to COOP until we let go of the lock
// however we are allowed to call StoreObjectInHandle here in preemptive
// mode because we are setting the value to NULL.
CONTRACT_VIOLATION(ModeViolation);
// Clear the handle and leave the lock.
// We do not have to to DisablePreemptiveGC here, because
// we just want to put NULL into a handle.
StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
tsLock.Release();
// Switch back to the initial GC mode.
if (ToggleGC)
{
pCurThread->DisablePreemptiveGC();
}
GCX_ASSERT_COOP();
return retVal;
}
}
tsLock.Release();
// Switch back to the initial GC mode.
if (ToggleGC)
{
pCurThread->DisablePreemptiveGC();
}
return retVal;
}
//--------------------------------------------------------------------
// Destruction. This occurs after the associated native thread
// has died.
//--------------------------------------------------------------------
Thread::~Thread()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// TODO: enable this
//_ASSERTE(GetThread() != this);
_ASSERTE(m_ThrewControlForThread == 0);
// AbortRequest is coupled with TrapReturningThread.
// We should have unmarked the thread for abort.
// !!! Can not assert here. If a thread has no managed code on stack
// !!! we leave the g_TrapReturningThread set so that the thread will be
// !!! aborted if it enters managed code.
//_ASSERTE(!IsAbortRequested());
// We should not have the Thread marked for abort. But if we have
// we need to unmark it so that g_TrapReturningThreads is decremented.
if (IsAbortRequested())
{
UnmarkThreadForAbort(TAR_ALL);
}
#if defined(_DEBUG) && defined(TRACK_SYNC)
_ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0);
delete m_pTrackSync;
#endif // TRACK_SYNC
_ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit());
if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit())
{
WaitEventLink *walk = &m_WaitEventLink;
while (walk->m_Next) {
ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1));
StoreEventToEventStore (walk->m_Next->m_EventWait);
}
m_WaitEventLink.m_Next = NULL;
}
if (m_StateNC & TSNC_ExistInThreadStore) {
BOOL ret;
ret = ThreadStore::RemoveThread(this);
_ASSERTE(ret);
}
#ifdef _DEBUG
m_pFrame = (Frame *)POISONC;
#endif
// Update Perfmon counters.
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical--);
// Current recognized threads are non-runtime threads that are alive and ran under the
// runtime. Check whether this Thread was one of them.
if ((m_State & TS_WeOwn) == 0)
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads--);
}
else
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical--);
}
// Normally we shouldn't get here with a valid thread handle; however if SetupThread
// failed (due to an OOM for example) then we need to CloseHandle the thread
// handle if we own it.
if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE))
{
CloseHandle(GetThreadHandle());
}
if (m_DebugSuspendEvent.IsValid())
{
m_DebugSuspendEvent.CloseEvent();
}
if (m_EventWait.IsValid())
{
m_EventWait.CloseEvent();
}
FreeIOCompletionContext();
if (m_OSContext)
delete m_OSContext;
if (GetSavedRedirectContext())
{
delete GetSavedRedirectContext();
SetSavedRedirectContext(NULL);
}
#ifdef FEATURE_COMINTEROP
if (m_pRCWStack)
delete m_pRCWStack;
#endif
if (m_pExceptionDuringStartup)
{
Exception::Delete (m_pExceptionDuringStartup);
}
ClearContext();
if (!IsAtProcessExit())
{
// Destroy any handles that we're using to hold onto exception objects
SafeSetThrowables(NULL);
DestroyShortWeakHandle(m_ExposedObject);
DestroyStrongHandle(m_StrongHndToExposedObject);
}
g_pThinLockThreadIdDispenser->DisposeId(GetThreadId());
//Ensure DeleteThreadStaticData was executed
_ASSERTE(m_pThreadLocalBlock == NULL);
_ASSERTE(m_pTLBTable == NULL);
_ASSERTE(m_TLBTableSize == 0);
#ifdef FEATURE_PREJIT
if (m_pIBCInfo) {
delete m_pIBCInfo;
}
#endif
#ifdef _DEBUG
if (m_pFiberInfo != NULL) {
delete [] (DWORD_PTR*)m_pFiberInfo[0];
}
#endif
#ifdef FEATURE_EVENT_TRACE
// Destruct the thread local type cache for allocation sampling
if(m_pAllLoggedTypes) {
ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes);
}
#endif // FEATURE_EVENT_TRACE
// Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
CrstHolder lock(&g_DeadlockAwareCrst);
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
void Thread::BaseCoUninitialize()
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
_ASSERTE(GetThread() == this);
BEGIN_SO_TOLERANT_CODE(this);
// BEGIN_SO_TOLERANT_CODE wraps a __try/__except around this call, so if the OS were to allow
// an exception to leak through to us, we'll catch it.
::CoUninitialize();
END_SO_TOLERANT_CODE;
}// BaseCoUninitialize
#ifdef FEATURE_COMINTEROP
void Thread::BaseWinRTUninitialize()
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
_ASSERTE(WinRTSupported());
_ASSERTE(GetThread() == this);
_ASSERTE(IsWinRTInitialized());
BEGIN_SO_TOLERANT_CODE(this);
RoUninitialize();
END_SO_TOLERANT_CODE;
}
#endif // FEATURE_COMINTEROP
void Thread::CoUninitialize()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// Running threads might have performed a CoInitialize which must
// now be balanced.
BOOL needsUninitialize = IsCoInitialized()
#ifdef FEATURE_COMINTEROP
|| IsWinRTInitialized()
#endif // FEATURE_COMINTEROP
;
if (!IsAtProcessExit() && needsUninitialize)
{
GCX_PREEMP();
CONTRACT_VIOLATION(ThrowsViolation);
if (IsCoInitialized())
{
BaseCoUninitialize();
FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized);
}
#ifdef FEATURE_COMINTEROP
if (IsWinRTInitialized())
{
_ASSERTE(WinRTSupported());
BaseWinRTUninitialize();
ResetWinRTInitialized();
}
#endif // FEATURE_COMNITEROP
}
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
void Thread::CleanupDetachedThreads()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(!ThreadStore::HoldingThreadStore());
ThreadStoreLockHolder threadStoreLockHolder;
Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n");
while (thread != NULL)
{
Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0);
if (thread->IsDetached() && thread->m_UnmanagedRefCount == 0)
{
STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread);
// Unmark that the thread is detached while we have the
// thread store lock. This will ensure that no other
// thread will race in here and try to delete it, too.
FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached);
FastInterlockDecrement(&m_DetachCount);
if (!thread->IsBackground())
FastInterlockDecrement(&m_ActiveDetachCount);
// If the debugger is attached, then we need to unlock the
// thread store before calling OnThreadTerminate. That
// way, we won't be holding the thread store lock if we
// need to block sending a detach thread event.
BOOL debuggerAttached =
#ifdef DEBUGGING_SUPPORTED
CORDebuggerAttached();
#else // !DEBUGGING_SUPPORTED
FALSE;
#endif // !DEBUGGING_SUPPORTED
if (debuggerAttached)
ThreadStore::UnlockThreadStore();
thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE);
#ifdef DEBUGGING_SUPPORTED
if (debuggerAttached)
{
ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
// We remember the next Thread in the thread store
// list before deleting the current one. But we can't
// use that Thread pointer now that we release the
// thread store lock in the middle of the loop. We
// have to start from the beginning of the list every
// time. If two threads T1 and T2 race into
// CleanupDetachedThreads, then T1 will grab the first
// Thread on the list marked for deletion and release
// the lock. T2 will grab the second one on the
// list. T2 may complete destruction of its Thread,
// then T1 might re-acquire the thread store lock and
// try to use the next Thread in the thread store. But
// T2 just deleted that next Thread.
thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
}
else
#endif // DEBUGGING_SUPPORTED
{
thread = next;
}
}
else if (thread->HasThreadState(TS_Finalized))
{
STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread);
thread->ResetThreadState(TS_Finalized);
// We have finalized the managed Thread object. Now it is time to clean up the unmanaged part
thread->DecExternalCount(TRUE);
thread = next;
}
else
{
thread = next;
}
}
s_fCleanFinalizedThread = FALSE;
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
void Thread::CleanupCOMState()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
#ifdef FEATURE_COMINTEROP
if (GetFinalApartment() == Thread::AS_InSTA)
ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
#endif // FEATURE_COMINTEROP
// Running threads might have performed a CoInitialize which must
// now be balanced. However only the thread that called COInitialize can
// call CoUninitialize.
BOOL needsUninitialize = IsCoInitialized()
#ifdef FEATURE_COMINTEROP
|| IsWinRTInitialized()
#endif // FEATURE_COMINTEROP
;
if (needsUninitialize)
{
GCX_PREEMP();
CONTRACT_VIOLATION(ThrowsViolation);
if (IsCoInitialized())
{
BaseCoUninitialize();
ResetCoInitialized();
}
#ifdef FEATURE_COMINTEROP
if (IsWinRTInitialized())
{
_ASSERTE(WinRTSupported());
BaseWinRTUninitialize();
ResetWinRTInitialized();
}
#endif // FEATURE_COMINTEROP
}
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
// See general comments on thread destruction (code:#threadDestruction) above.
void Thread::OnThreadTerminate(BOOL holdingLock)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// #ReportDeadOnThreadTerminate
// Caller should have put the TS_ReportDead bit on by now.
// We don't want any windows after the exit event but before the thread is marked dead.
// If a debugger attached during such a window (or even took a dump at the exit event),
// then it may not realize the thread is dead.
// So ensure we mark the thread as dead before we send the tool notifications.
// The TS_ReportDead bit will cause the debugger to view this as TS_Dead.
_ASSERTE(HasThreadState(TS_ReportDead));
// Should not use OSThreadId:
// OSThreadId may change for the current thread is the thread is blocked and rescheduled
// by host.
Thread *pCurrentThread = GetThread();
DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0;
DWORD ThisThreadID = GetThreadId();
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// If the currently running thread is the thread that died and it is an STA thread, then we
// need to release all the RCW's in the current context. However, we cannot do this if we
// are in the middle of process detach.
if (!IsAtProcessExit() && this == GetThread())
{
CleanupCOMState();
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
if (g_fEEShutDown != 0)
{
// We have started shutdown. Not safe to touch CLR state.
return;
}
// We took a count during construction, and we rely on the count being
// non-zero as we terminate the thread here.
_ASSERTE(m_ExternalRefCount > 0);
// The thread is no longer running. It's important that we zero any general OBJECTHANDLE's
// on this Thread object. That's because we need the managed Thread object to be subject to
// GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. If e.g. the
// Thread's AbortReason (which is an arbitrary object) contains transitively a reference back
// to the Thread, then we have an uncollectible cycle. When the thread is executing, nothing
// can be collected anyway. But now that we stop running the cycle concerns us.
//
// It's important that we only use OBJECTHANDLE's that are retrievable while the thread is
// still running. That's what allows us to zero them here with impunity:
{
// No handles to clean up in the m_ExceptionState
_ASSERTE(!m_ExceptionState.IsExceptionInProgress());
GCX_COOP();
// Destroy the LastThrown handle (and anything that violates the above assert).
SafeSetThrowables(NULL);
// Cleaning up the AbortReason is tricky, since the handle is only valid if the ADID is valid
// ...and we can only perform this operation if other threads aren't racing to update these
// values on our thread asynchronously.
ClearAbortReason();
// Free all structures related to thread statics for this thread
DeleteThreadStaticData();
}
if (GCHeapUtilities::IsGCHeapInitialized())
{
// Guaranteed to NOT be a shutdown case, because we tear down the heap before
// we tear down any threads during shutdown.
if (ThisThreadID == CurrentThreadID)
{
GCX_COOP();
GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL);
m_alloc_context.init();
}
}
// We switch a thread to dead when it has finished doing useful work. But it
// remains in the thread store so long as someone keeps it alive. An exposed
// object will do this (it releases the refcount in its finalizer). If the
// thread is never released, we have another look during product shutdown and
// account for the unreleased refcount of the uncollected exposed object:
if (IsDead())
{
GCX_COOP();
_ASSERTE(IsAtProcessExit());
ClearContext();
if (m_ExposedObject != NULL)
DecExternalCount(holdingLock); // may destruct now
}
else
{
#ifdef DEBUGGING_SUPPORTED
//
// If we're debugging, let the debugger know that this thread is
// gone.
//
// There is a race here where the debugger could have attached after
// we checked (and thus didn't release the lock). In this case,
// we can't call out to the debugger or we risk a deadlock.
//
if (!holdingLock && CORDebuggerAttached())
{
g_pDebugInterface->DetachThread(this);
}
#endif // DEBUGGING_SUPPORTED
#ifdef PROFILING_SUPPORTED
// If a profiler is present, then notify the profiler of thread destroy
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
GCX_PREEMP();
g_profControlBlock.pProfInterface->ThreadDestroyed((ThreadID) this);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
if (!holdingLock)
{
LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n"));
ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
}
if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
{
// We must be holding the ThreadStore lock in order to clean up alloc context.
// We should never call FixAllocContext during GC.
GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL);
m_alloc_context.init();
}
FastInterlockOr((ULONG *) &m_State, TS_Dead);
ThreadStore::s_pThreadStore->m_DeadThreadCount++;
ThreadStore::s_pThreadStore->IncrementDeadThreadCountForGCTrigger();
if (IsUnstarted())
ThreadStore::s_pThreadStore->m_UnstartedThreadCount--;
else
{
if (IsBackground())
ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
}
FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background));
//
// If this thread was told to trip for debugging between the
// sending of the detach event above and the locking of the
// thread store lock, then remove the flag and decrement the
// global trap returning threads count.
//
if (!IsAtProcessExit())
{
// A thread can't die during a GCPending, because the thread store's
// lock is held by the GC thread.
if (m_State & TS_DebugSuspendPending)
UnmarkForSuspension(~TS_DebugSuspendPending);
// CoreCLR does not support user-requested thread suspension
_ASSERTE(!(m_State & TS_UserSuspendPending));
if (CurrentThreadID == ThisThreadID && IsAbortRequested())
{
UnmarkThreadForAbort(Thread::TAR_ALL);
}
}
if (GetThreadHandle() != INVALID_HANDLE_VALUE)
{
if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
{
m_ThreadHandleForClose = GetThreadHandle();
}
SetThreadHandle (INVALID_HANDLE_VALUE);
}
m_OSThreadId = 0;
// If nobody else is holding onto the thread, we may destruct it here:
ULONG oldCount = DecExternalCount(TRUE);
// If we are shutting down the process, we only have one thread active in the
// system. So we can disregard all the reasons that hold this thread alive --
// TLS is about to be reclaimed anyway.
if (IsAtProcessExit())
while (oldCount > 0)
{
oldCount = DecExternalCount(TRUE);
}
// ASSUME THAT THE THREAD IS DELETED, FROM HERE ON
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0);
_ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
ThreadStore::s_pThreadStore->m_BackgroundThreadCount);
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
ThreadStore::s_pThreadStore->m_UnstartedThreadCount);
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
ThreadStore::s_pThreadStore->m_DeadThreadCount);
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
ThreadStore::CheckForEEShutdown();
if (ThisThreadID == CurrentThreadID)
{
// NULL out the thread block in the tls. We can't do this if we aren't on the
// right thread. But this will only happen during a shutdown. And we've made
// a "best effort" to reduce to a single thread before we begin the shutdown.
SetThread(NULL);
SetAppDomain(NULL);
}
if (!holdingLock)
{
LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n"));
ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID);
}
}
}
// Helper functions to check for duplicate handles. we only do this check if
// a waitfor multiple fails.
int __cdecl compareHandles( const void *arg1, const void *arg2 )
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
HANDLE h1 = *(HANDLE*)arg1;
HANDLE h2 = *(HANDLE*)arg2;
return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1);
}
BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
qsort(handles,countHandles,sizeof(HANDLE),compareHandles);
for (int i=1; i < countHandles; i++)
{
if (handles[i-1] == handles[i])
return TRUE;
}
return FALSE;
}
//--------------------------------------------------------------------
// Based on whether this thread has a message pump, do the appropriate
// style of Wait.
//--------------------------------------------------------------------
DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll,
DWORD millis, WaitMode mode, PendingSync *syncState)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
_ASSERTE(alertable || syncState == 0);
struct Param
{
Thread *pThis;
int countHandles;
HANDLE *handles;
BOOL waitAll;
DWORD millis;
WaitMode mode;
DWORD dwRet;
} param;
param.pThis = this;
param.countHandles = countHandles;
param.handles = handles;
param.waitAll = waitAll;
param.millis = millis;
param.mode = mode;
param.dwRet = (DWORD) -1;
EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode);
}
EE_FINALLY {
if (syncState) {
if (!GOT_EXCEPTION() &&
param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) {
// This thread has been removed from syncblk waiting list by the signalling thread
syncState->Restore(FALSE);
}
else
syncState->Restore(TRUE);
}
_ASSERTE (param.dwRet != WAIT_IO_COMPLETION);
}
EE_END_FINALLY;
return(param.dwRet);
}
DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args,
DWORD millis, WaitMode mode,
PendingSync *syncState)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
_ASSERTE(alertable || syncState == 0);
struct Param
{
Thread *pThis;
AppropriateWaitFunc func;
void *args;
DWORD millis;
WaitMode mode;
DWORD dwRet;
} param;
param.pThis = this;
param.func = func;
param.args = args;
param.millis = millis;
param.mode = mode;
param.dwRet = (DWORD) -1;
EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode);
}
EE_FINALLY {
if (syncState) {
if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
// This thread has been removed from syncblk waiting list by the signalling thread
syncState->Restore(FALSE);
}
else
syncState->Restore(TRUE);
}
_ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
}
EE_END_FINALLY;
return(param.dwRet);
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
//--------------------------------------------------------------------
// helper to do message wait
//--------------------------------------------------------------------
DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable)
{
STATIC_CONTRACT_THROWS;
// The true contract for GC trigger should be the following. But this puts a very strong restriction
// on contract for functions that call EnablePreemptiveGC.
//if (GetThread() && !ThreadStore::HoldingThreadStore(GetThread())) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_GC_TRIGGERS;
DWORD flags = 0;
DWORD dwReturn=WAIT_ABANDONED;
Thread* pThread = GetThread();
// If pThread is NULL, we'd better shut down.
if (pThread == NULL)
_ASSERTE (g_fEEShutDown);
DWORD lastError = 0;
BEGIN_SO_TOLERANT_CODE(pThread);
// If we're going to pump, we cannot use WAIT_ALL. That's because the wait would
// only be satisfied if a message arrives while the handles are signalled. If we
// want true WAIT_ALL, we need to fire up a different thread in the MTA and wait
// on his result. This isn't implemented yet.
//
// A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll
// in an STA with more than one handle.
if (bWaitAll)
{
if (numWaiters == 1)
bWaitAll = FALSE;
// The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative,
// is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT.
// So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point.
// We can't fix this, because it's a breaking change, so we just won't assert here.
// The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above.
}
if (bWaitAll)
flags |= COWAIT_WAITALL;
if (bAlertable)
flags |= COWAIT_ALERTABLE;
HRESULT hr = S_OK;
hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn);
if (hr == RPC_S_CALLPENDING)
{
dwReturn = WAIT_TIMEOUT;
}
else if (FAILED(hr))
{
// The service behaves differently on an STA vs. MTA in how much
// error information it propagates back, and in which form. We currently
// only get here in the STA case, so bias this logic that way.
dwReturn = WAIT_FAILED;
}
else
{
dwReturn += WAIT_OBJECT_0; // success -- bias back
}
lastError = ::GetLastError();
END_SO_TOLERANT_CODE;
// END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
::SetLastError(lastError);
return dwReturn;
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
DWORD WaitForMultipleObjectsEx_SO_TOLERANT (DWORD nCount, HANDLE *lpHandles, BOOL bWaitAll,DWORD dwMilliseconds, BOOL bAlertable)
{
STATIC_CONTRACT_SO_INTOLERANT;
DWORD dwRet = WAIT_FAILED;
DWORD lastError = 0;
BEGIN_SO_TOLERANT_CODE (GetThread ());
dwRet = ::WaitForMultipleObjectsEx (nCount, lpHandles, bWaitAll, dwMilliseconds, bAlertable);
lastError = ::GetLastError();
END_SO_TOLERANT_CODE;
// END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
::SetLastError(lastError);
return dwRet;
}
//--------------------------------------------------------------------
// Do appropriate wait based on apartment state (STA or MTA)
DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll,
DWORD timeout, WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
SO_INTOLERANT;
}
CONTRACTL_END;
BOOL alertable = (mode & WaitMode_Alertable) != 0;
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
if (alertable && !GetDomain()->MustForceTrivialWaitOperations())
{
ApartmentState as = GetFinalApartment();
if (AS_InMTA != as)
{
return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable);
}
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
return WaitForMultipleObjectsEx_SO_TOLERANT(numWaiters, pHandles, bWaitAll, timeout, alertable);
}
// A helper called by our two flavors of DoAppropriateWaitWorker
void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
// If thread abort is prevented, we do not want this thread to see thread abort and thread interrupt exception.
if (IsAbortPrevented())
{
return;
}
// A word about ordering for Interrupt. If someone tries to interrupt a thread
// that's in the interruptible state, we queue an APC. But if they try to interrupt
// a thread that's not in the interruptible state, we just record that fact. So
// we have to set TS_Interruptible before we test to see whether someone wants to
// interrupt us or else we have a race condition that causes us to skip the APC.
FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
if (HasThreadStateNC(TSNC_InRestoringSyncBlock))
{
// The thread is restoring SyncBlock for Object.Wait.
ResetThreadStateNC(TSNC_InRestoringSyncBlock);
}
else
{
HandleThreadInterrupt((mode & WaitMode_ADUnload) != 0);
// Safe to clear the interrupted state, no APC could have fired since we
// reset m_UserInterrupt (which inhibits our APC callback from doing
// anything).
FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
}
}
void MarkOSAlertableWait()
{
LIMITED_METHOD_CONTRACT;
GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait);
}
void UnMarkOSAlertableWait()
{
LIMITED_METHOD_CONTRACT;
GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait);
}
//--------------------------------------------------------------------
// Based on whether this thread has a message pump, do the appropriate
// style of Wait.
//--------------------------------------------------------------------
DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll,
DWORD millis, WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
DWORD ret = 0;
BOOL alertable = (mode & WaitMode_Alertable) != 0;
// Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx.
// So if we defer to a sync ctx, we will lose any extra bits. We must therefore not
// defer to a sync ctx if doing any non-default wait.
// If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx
// which will make mode != WaitMode_Alertable.
BOOL ignoreSyncCtx = (mode != WaitMode_Alertable);
if (GetDomain()->MustForceTrivialWaitOperations())
ignoreSyncCtx = TRUE;
// Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization
// context on the current thread and if there is, dispatch to it to do the wait.
// If the wait is non alertable we cannot forward the call to the sync context
// since fundamental parts of the system (such as the GC) rely on non alertable
// waits not running any managed code. Also if we are past the point in shutdown were we
// are allowed to run managed code then we can't forward the call to the sync context.
if (!ignoreSyncCtx && alertable && CanRunManagedCode(LoaderLockCheck::None)
&& !HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
{
GCX_COOP();
BOOL fSyncCtxPresent = FALSE;
OBJECTREF SyncCtxObj = NULL;
GCPROTECT_BEGIN(SyncCtxObj)
{
GetSynchronizationContext(&SyncCtxObj);
if (SyncCtxObj != NULL)
{
SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj;
if (syncRef->IsWaitNotificationRequired())
{
fSyncCtxPresent = TRUE;
ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis);
}
}
}
GCPROTECT_END();
if (fSyncCtxPresent)
return ret;
}
// Before going to pre-emptive mode the thread needs to be flagged as waiting for
// the debugger. This used to be accomplished by the TS_Interruptible flag but that
// doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
// COOP mode so we set the bit before the transition. For the calls that are already
// in pre-emptive mode those are still buggy. This is only a partial fix.
BOOL isCoop = PreemptiveGCDisabled();
ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
GCX_PREEMP();
if (alertable)
{
DoAppropriateWaitWorkerAlertableHelper(mode);
}
StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
ULONGLONG dwStart = 0, dwEnd;
retry:
if (millis != INFINITE)
{
dwStart = CLRGetTickCount64();
}
ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode);
if (ret == WAIT_IO_COMPLETION)
{
_ASSERTE (alertable);
if (m_State & TS_Interrupted)
{
HandleThreadInterrupt(mode & WaitMode_ADUnload);
}
// We could be woken by some spurious APC or an EE APC queued to
// interrupt us. In the latter case the TS_Interrupted bit will be set
// in the thread state bits. Otherwise we just go back to sleep again.
if (millis != INFINITE)
{
dwEnd = CLRGetTickCount64();
if (dwEnd >= dwStart + millis)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
}
goto retry;
}
_ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) ||
(ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) ||
(ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED));
// countHandles is used as an unsigned -- it should never be negative.
_ASSERTE(countHandles >= 0);
// We support precisely one WAIT_FAILED case, where we attempt to wait on a
// thread handle and the thread is in the process of dying we might get a
// invalid handle substatus. Turn this into a successful wait.
// There are three cases to consider:
// 1) Only waiting on one handle: return success right away.
// 2) Waiting for all handles to be signalled: retry the wait without the
// affected handle.
// 3) Waiting for one of multiple handles to be signalled: return with the
// first handle that is either signalled or has become invalid.
if (ret == WAIT_FAILED)
{
DWORD errorCode = ::GetLastError();
if (errorCode == ERROR_INVALID_PARAMETER)
{
if (CheckForDuplicateHandles(countHandles, handles))
COMPlusThrow(kDuplicateWaitObjectException);
else
COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode));
}
else if (errorCode == ERROR_ACCESS_DENIED)
{
// A Win32 ACL could prevent us from waiting on the handle.
COMPlusThrow(kUnauthorizedAccessException);
}
else if (errorCode == ERROR_NOT_ENOUGH_MEMORY)
{
ThrowOutOfMemory();
}
#ifdef FEATURE_PAL
else if (errorCode == ERROR_NOT_SUPPORTED)
{
// "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync
// object is included in the array
COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_NamedSyncObjectWaitAnyWaitAll"));
}
#endif
else if (errorCode != ERROR_INVALID_HANDLE)
{
ThrowWin32(errorCode);
}
if (countHandles == 1)
ret = WAIT_OBJECT_0;
else if (waitAll)
{
// Probe all handles with a timeout of zero. When we find one that's
// invalid, move it out of the list and retry the wait.
for (int i = 0; i < countHandles; i++)
{
// WaitForSingleObject won't pump memssage; we already probe enough space
// before calling this function and we don't want to fail here, so we don't
// do a transition to tolerant code here
DWORD subRet = WaitForSingleObject (handles[i], 0);
if (subRet != WAIT_FAILED)
continue;
_ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE);
if ((countHandles - i - 1) > 0)
memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE));
countHandles--;
break;
}
// Compute the new timeout value by assume that the timeout
// is not large enough for more than one wrap
dwEnd = CLRGetTickCount64();
if (millis != INFINITE)
{
if (dwEnd >= dwStart + millis)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
}
goto retry;
}
else
{
// Probe all handles with a timeout as zero, succeed with the first
// handle that doesn't timeout.
ret = WAIT_OBJECT_0;
int i;
for (i = 0; i < countHandles; i++)
{
TryAgain:
// WaitForSingleObject won't pump memssage; we already probe enough space
// before calling this function and we don't want to fail here, so we don't
// do a transition to tolerant code here
DWORD subRet = WaitForSingleObject (handles[i], 0);
if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED))
break;
if (subRet == WAIT_ABANDONED)
{
ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED;
break;
}
// If we get alerted it just masks the real state of the current
// handle, so retry the wait.
if (subRet == WAIT_IO_COMPLETION)
goto TryAgain;
_ASSERTE(subRet == WAIT_TIMEOUT);
ret++;
}
}
}
WaitCompleted:
_ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
return ret;
}
DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args,
DWORD millis, WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
BOOL alertable = (mode & WaitMode_Alertable)!=0;
// Before going to pre-emptive mode the thread needs to be flagged as waiting for
// the debugger. This used to be accomplished by the TS_Interruptible flag but that
// doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
// COOP mode so we set the bit before the transition. For the calls that are already
// in pre-emptive mode those are still buggy. This is only a partial fix.
BOOL isCoop = PreemptiveGCDisabled();
ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
GCX_PREEMP();
// <TODO>
// @TODO cwb: we don't know whether a thread has a message pump or
// how to pump its messages, currently.
// @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until
// we get alertable waits on that platform.</TODO>
DWORD ret;
if(alertable)
{
DoAppropriateWaitWorkerAlertableHelper(mode);
}
DWORD option;
if (alertable)
{
option = WAIT_ALERTABLE;
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
ApartmentState as = GetFinalApartment();
if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations())
{
option |= WAIT_MSGPUMP;
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
}
else
{
option = 0;
}
ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
ULONGLONG dwStart = 0;
ULONGLONG dwEnd;
retry:
if (millis != INFINITE)
{
dwStart = CLRGetTickCount64();
}
ret = func(args, millis, option);
if (ret == WAIT_IO_COMPLETION)
{
_ASSERTE (alertable);
if ((m_State & TS_Interrupted))
{
HandleThreadInterrupt(mode & WaitMode_ADUnload);
}
if (millis != INFINITE)
{
dwEnd = CLRGetTickCount64();
if (dwEnd >= dwStart + millis)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
}
goto retry;
}
WaitCompleted:
_ASSERTE(ret == WAIT_OBJECT_0 ||
ret == WAIT_ABANDONED ||
ret == WAIT_TIMEOUT ||
ret == WAIT_FAILED);
_ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
return ret;
}
//--------------------------------------------------------------------
// Only one style of wait for DoSignalAndWait since we don't support this on STA Threads
//--------------------------------------------------------------------
DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
_ASSERTE(alertable || syncState == 0);
struct Param
{
Thread *pThis;
HANDLE *handles;
DWORD millis;
BOOL alertable;
DWORD dwRet;
} param;
param.pThis = this;
param.handles = handles;
param.millis = millis;
param.alertable = alertable;
param.dwRet = (DWORD) -1;
EE_TRY_FOR_FINALLY(Param *, pParam, &param) {
pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable);
}
EE_FINALLY {
if (syncState) {
if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
// This thread has been removed from syncblk waiting list by the signalling thread
syncState->Restore(FALSE);
}
else
syncState->Restore(TRUE);
}
_ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
}
EE_END_FINALLY;
return(param.dwRet);
}
DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
DWORD ret = 0;
GCX_PREEMP();
if(alertable)
{
DoAppropriateWaitWorkerAlertableHelper(WaitMode_None);
}
StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
ULONGLONG dwStart = 0, dwEnd;
if (INFINITE != millis)
{
dwStart = CLRGetTickCount64();
}
ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable);