Permalink
Fetching contributors…
Cannot retrieve contributors at this time
7194 lines (6179 sloc) 291 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
//
#include "common.h"
#ifdef WIN64EXCEPTIONS
#include "exceptionhandling.h"
#include "dbginterface.h"
#include "asmconstants.h"
#include "eetoprofinterfacewrapper.inl"
#include "eedbginterfaceimpl.inl"
#include "perfcounters.h"
#include "eventtrace.h"
#include "virtualcallstub.h"
#if defined(_TARGET_X86_)
#define USE_CURRENT_CONTEXT_IN_FILTER
#endif // _TARGET_X86_
#if defined(_TARGET_ARM_) || defined(_TARGET_X86_)
#define VSD_STUB_CAN_THROW_AV
#endif // _TARGET_ARM_ || _TARGET_X86_
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
// ARM/ARM64 uses Caller-SP to locate PSPSym in the funclet frame.
#define USE_CALLER_SP_IN_FUNCLET
#endif // _TARGET_ARM_ || _TARGET_ARM64_
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) || defined(_TARGET_X86_)
#define ADJUST_PC_UNWOUND_TO_CALL
#define STACK_RANGE_BOUNDS_ARE_CALLER_SP
#define USE_FUNCLET_CALL_HELPER
// For ARM/ARM64, EstablisherFrame is Caller-SP (SP just before executing call instruction).
// This has been confirmed by AaronGi from the kernel team for Windows.
//
// For x86/Linux, RtlVirtualUnwind sets EstablisherFrame as Caller-SP.
#define ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
#endif // _TARGET_ARM_ || _TARGET_ARM64_ || _TARGET_X86_
#ifndef FEATURE_PAL
void __declspec(noinline)
ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord,
UINT_PTR ReturnValue,
UINT_PTR TargetIP,
UINT_PTR TargetFrameSp);
#endif // !FEATURE_PAL
#ifdef USE_CURRENT_CONTEXT_IN_FILTER
inline void CaptureNonvolatileRegisters(PKNONVOLATILE_CONTEXT pNonvolatileContext, PCONTEXT pContext)
{
#define CALLEE_SAVED_REGISTER(reg) pNonvolatileContext->reg = pContext->reg;
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
}
inline void RestoreNonvolatileRegisters(PCONTEXT pContext, PKNONVOLATILE_CONTEXT pNonvolatileContext)
{
#define CALLEE_SAVED_REGISTER(reg) pContext->reg = pNonvolatileContext->reg;
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
}
inline void RestoreNonvolatileRegisterPointers(PT_KNONVOLATILE_CONTEXT_POINTERS pContextPointers, PKNONVOLATILE_CONTEXT pNonvolatileContext)
{
#define CALLEE_SAVED_REGISTER(reg) pContextPointers->reg = &pNonvolatileContext->reg;
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
}
#endif
#ifndef DACCESS_COMPILE
// o Functions and funclets are tightly associated. In fact, they are laid out in contiguous memory.
// They also present some interesting issues with respect to EH because we will see callstacks with
// both functions and funclets, but need to logically treat them as the original single IL function
// described them.
//
// o All funclets are ripped out of line from the main function. Finally clause are pulled out of
// line and replaced by calls to the funclets. Catch clauses, however, are simply pulled out of
// line. !!!This causes a loss of nesting information in clause offsets.!!! A canonical example of
// two different functions which look identical due to clause removal is as shown in the code
// snippets below. The reason they look identical in the face of out-of-line funclets is that the
// region bounds for the "try A" region collapse and become identical to the region bounds for
// region "try B". This will look identical to the region information for Bar because Bar must
// have a separate entry for each catch clause, both of which will have the same try-region bounds.
//
// void Foo() void Bar()
// { {
// try A try C
// { {
// try B BAR_BLK_1
// { }
// FOO_BLK_1 catch C
// } {
// catch B BAR_BLK_2
// { }
// FOO_BLK_2 catch D
// } {
// } BAR_BLK_3
// catch A }
// { }
// FOO_BLK_3
// }
// }
//
// O The solution is to duplicate all clauses that logically cover the funclet in its parent
// method, but with the try-region covering the entire out-of-line funclet code range. This will
// differentiate the canonical example above because the CatchB funclet will have a try-clause
// covering it whose associated handler is CatchA. In Bar, there is no such duplication of any clauses.
//
// o The behavior of the personality routine depends upon the JIT to properly order the clauses from
// inside-out. This allows us to properly handle a situation where our control PC is covered by clauses
// that should not be considered because a more nested clause will catch the exception and resume within
// the scope of the outer clauses.
//
// o This sort of clause duplication for funclets should be done for all clause types, not just catches.
// Unfortunately, I cannot articulate why at the moment.
//
#ifdef _DEBUG
void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc);
static void DoEHLog(DWORD lvl, __in_z const char *fmt, ...);
#define EH_LOG(expr) { DoEHLog expr ; }
#else
#define EH_LOG(expr)
#endif
TrackerAllocator g_theTrackerAllocator;
bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
Thread* pThread,
CONTEXT* pContextRecord,
bool fAborting
);
void FixContext(PCONTEXT pContextRecord)
{
#define FIXUPREG(reg, value) \
do { \
STRESS_LOG2(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p\n", \
pContextRecord->reg, \
(value)); \
pContextRecord->reg = (value); \
} while (0)
#ifdef _TARGET_X86_
size_t resumeSp = EECodeManager::GetResumeSp(pContextRecord);
FIXUPREG(Esp, resumeSp);
#endif // _TARGET_X86_
#undef FIXUPREG
}
MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
#ifdef FEATURE_PAL
BOOL HandleHardwareException(PAL_SEHException* ex);
BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord);
#endif // FEATURE_PAL
static ExceptionTracker* GetTrackerMemory()
{
CONTRACTL
{
GC_TRIGGERS;
NOTHROW;
MODE_ANY;
}
CONTRACTL_END;
return g_theTrackerAllocator.GetTrackerMemory();
}
void FreeTrackerMemory(ExceptionTracker* pTracker, TrackerMemoryType mem)
{
CONTRACTL
{
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
}
CONTRACTL_END;
if (mem & memManaged)
{
pTracker->ReleaseResources();
}
if (mem & memUnmanaged)
{
g_theTrackerAllocator.FreeTrackerMemory(pTracker);
}
}
static inline void UpdatePerformanceMetrics(CrawlFrame *pcfThisFrame, BOOL bIsRethrownException, BOOL bIsNewException)
{
WRAPPER_NO_CONTRACT;
COUNTER_ONLY(GetPerfCounters().m_Excep.cThrown++);
// Fire an exception thrown ETW event when an exception occurs
ETW::ExceptionLog::ExceptionThrown(pcfThisFrame, bIsRethrownException, bIsNewException);
}
void ShutdownEEAndExitProcess()
{
ForceEEShutdown(SCA_ExitProcessWhenShutdownComplete);
}
void InitializeExceptionHandling()
{
EH_LOG((LL_INFO100, "InitializeExceptionHandling(): ExceptionTracker size: 0x%x bytes\n", sizeof(ExceptionTracker)));
InitSavedExceptionInfo();
CLRAddVectoredHandlers();
g_theTrackerAllocator.Init();
// Initialize the lock used for synchronizing access to the stacktrace in the exception object
g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
#ifdef FEATURE_PAL
// Register handler of hardware exceptions like null reference in PAL
PAL_SetHardwareExceptionHandler(HandleHardwareException, IsSafeToHandleHardwareException);
// Register handler for determining whether the specified IP has code that is a GC marker for GCCover
PAL_SetGetGcMarkerExceptionCode(GetGcMarkerExceptionCode);
// Register handler for termination requests (e.g. SIGTERM)
PAL_SetTerminationRequestHandler(ShutdownEEAndExitProcess);
#endif // FEATURE_PAL
}
struct UpdateObjectRefInResumeContextCallbackState
{
UINT_PTR uResumeSP;
Frame *pHighestFrameWithRegisters;
TADDR uResumeFrameFP;
TADDR uICFCalleeSavedFP;
#ifdef _DEBUG
UINT nFrames;
bool fFound;
#endif
};
// Stack unwind callback for UpdateObjectRefInResumeContext().
StackWalkAction UpdateObjectRefInResumeContextCallback(CrawlFrame* pCF, LPVOID pData)
{
CONTRACTL
{
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
UpdateObjectRefInResumeContextCallbackState *pState = (UpdateObjectRefInResumeContextCallbackState*)pData;
CONTEXT* pSrcContext = pCF->GetRegisterSet()->pCurrentContext;
INDEBUG(pState->nFrames++);
// Check to see if we have reached the resume frame.
if (pCF->IsFrameless())
{
// At this point, we are trying to find the managed frame containing the catch handler to be invoked.
// This is done by comparing the SP of the managed frame for which this callback was invoked with the
// SP the OS passed to our personality routine for the current managed frame. If they match, then we have
// reached the target frame.
//
// It is possible that a managed frame may execute a PInvoke after performing a stackalloc:
//
// 1) The ARM JIT will always inline the PInvoke in the managed frame, whether or not the frame
// contains EH. As a result, the ICF will live in the same frame which performs stackalloc.
//
// 2) JIT64 will only inline the PInvoke in the managed frame if the frame *does not* contain EH. If it does,
// then pinvoke will be performed via an ILStub and thus, stackalloc will be performed in a frame different
// from the one (ILStub) that contains the ICF.
//
// Thus, for the scenario where the catch handler lives in the frame that performed stackalloc, in case of
// ARM JIT, the SP returned by the OS will be the SP *after* the stackalloc has happened. However,
// the stackwalker will invoke this callback with the CrawlFrameSP that was initialized at the time ICF was setup, i.e.,
// it will be the SP after the prolog has executed (refer to InlinedCallFrame::UpdateRegDisplay).
//
// Thus, checking only the SP will not work for this scenario when using the ARM JIT.
//
// To address this case, the callback data also contains the frame pointer (FP) passed by the OS. This will
// be the value that is saved in the "CalleeSavedFP" field of the InlinedCallFrame during ICF
// initialization. When the stackwalker sees an ICF and invokes this callback, we copy the value of "CalleeSavedFP" in the data
// structure passed to this callback.
//
// Later, when the stackwalker invokes the callback for the managed frame containing the ICF, and the check
// for SP comaprison fails, we will compare the FP value we got from the ICF with the FP value the OS passed
// to us. If they match, then we have reached the resume frame.
//
// Note: This problem/scenario is not applicable to JIT64 since it does not perform pinvoke inlining if the
// method containing pinvoke also contains EH. Thus, the SP check will never fail for it.
if (pState->uResumeSP == GetSP(pSrcContext))
{
INDEBUG(pState->fFound = true);
return SWA_ABORT;
}
// Perform the FP check, as explained above.
if ((pState->uICFCalleeSavedFP !=0) && (pState->uICFCalleeSavedFP == pState->uResumeFrameFP))
{
// FP from ICF is the one that was also copied to the FP register in InlinedCallFrame::UpdateRegDisplay.
_ASSERTE(pState->uICFCalleeSavedFP == GetFP(pSrcContext));
INDEBUG(pState->fFound = true);
return SWA_ABORT;
}
// Reset the ICF FP in callback data
pState->uICFCalleeSavedFP = 0;
}
else
{
Frame *pFrame = pCF->GetFrame();
if (pFrame->NeedsUpdateRegDisplay())
{
CONSISTENCY_CHECK(pFrame >= pState->pHighestFrameWithRegisters);
pState->pHighestFrameWithRegisters = pFrame;
// Is this an InlinedCallFrame?
if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())
{
// If we are here, then ICF is expected to be active.
_ASSERTE(InlinedCallFrame::FrameHasActiveCall(pFrame));
// Copy the CalleeSavedFP to the data structure that is passed this callback
// by the stackwalker. This is the value of frame pointer when ICF is setup
// in a managed frame.
//
// Setting this value here is based upon the assumption (which holds true on X64 and ARM) that
// the stackwalker invokes the callback for explicit frames before their
// container/corresponding managed frame.
pState->uICFCalleeSavedFP = ((PTR_InlinedCallFrame)pFrame)->GetCalleeSavedFP();
}
else
{
// For any other frame, simply reset uICFCalleeSavedFP field
pState->uICFCalleeSavedFP = 0;
}
}
}
return SWA_CONTINUE;
}
//
// Locates the locations of the nonvolatile registers. This will be used to
// retrieve the latest values of the object references before we resume
// execution from an exception.
//
//static
bool ExceptionTracker::FindNonvolatileRegisterPointers(Thread* pThread, UINT_PTR uOriginalSP, REGDISPLAY* pRegDisplay, TADDR uResumeFrameFP)
{
CONTRACTL
{
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
//
// Find the highest frame below the resume frame that will update the
// REGDISPLAY. A normal StackWalkFrames will RtlVirtualUnwind through all
// managed frames on the stack, so this avoids some unnecessary work. The
// frame we find will have all of the nonvolatile registers/other state
// needed to start a managed unwind from that point.
//
Frame *pHighestFrameWithRegisters = NULL;
Frame *pFrame = pThread->GetFrame();
while ((UINT_PTR)pFrame < uOriginalSP)
{
if (pFrame->NeedsUpdateRegDisplay())
pHighestFrameWithRegisters = pFrame;
pFrame = pFrame->Next();
}
//
// Do a stack walk from this frame. This may find a higher frame within
// the resume frame (ex. inlined pinvoke frame). This will also update
// the REGDISPLAY pointers if any intervening managed frames saved
// nonvolatile registers.
//
UpdateObjectRefInResumeContextCallbackState state;
state.uResumeSP = uOriginalSP;
state.uResumeFrameFP = uResumeFrameFP;
state.uICFCalleeSavedFP = 0;
state.pHighestFrameWithRegisters = pHighestFrameWithRegisters;
INDEBUG(state.nFrames = 0);
INDEBUG(state.fFound = false);
pThread->StackWalkFramesEx(pRegDisplay, &UpdateObjectRefInResumeContextCallback, &state, 0, pHighestFrameWithRegisters);
// For managed exceptions, we should at least find a HelperMethodFrame (the one we put in IL_Throw()).
// For native exceptions such as AV's, we should at least find the FaultingExceptionFrame.
// If we don't find anything, then we must have hit an SO when we are trying to erect an HMF.
// Bail out in such situations.
//
// Note that pinvoke frames may be inlined in a managed method, so we cannot use the child SP (a.k.a. the current SP)
// to check for explicit frames "higher" on the stack ("higher" here means closer to the leaf frame). The stackwalker
// knows how to deal with inlined pinvoke frames, and it issues callbacks for them before issuing the callback for the
// containing managed method. So we have to do this check after we are done with the stackwalk.
pHighestFrameWithRegisters = state.pHighestFrameWithRegisters;
if (pHighestFrameWithRegisters == NULL)
{
return false;
}
CONSISTENCY_CHECK(state.nFrames);
CONSISTENCY_CHECK(state.fFound);
CONSISTENCY_CHECK(NULL != pHighestFrameWithRegisters);
//
// Now the REGDISPLAY has been unwound to the resume frame. The
// nonvolatile registers will either point into pHighestFrameWithRegisters,
// an inlined pinvoke frame, or into calling managed frames.
//
return true;
}
//static
void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDISPLAY *pRegDisplay, bool fAborting)
{
CONTEXT* pAbortContext = NULL;
if (fAborting)
{
pAbortContext = GetThread()->GetAbortContext();
}
#ifndef FEATURE_PAL
#define HANDLE_NULL_CONTEXT_POINTER _ASSERTE(false)
#else // FEATURE_PAL
#define HANDLE_NULL_CONTEXT_POINTER
#endif // FEATURE_PAL
#define UPDATEREG(reg) \
do { \
if (pRegDisplay->pCurrentContextPointers->reg != NULL) \
{ \
STRESS_LOG3(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p from %p\n", \
pContextRecord->reg, \
*pRegDisplay->pCurrentContextPointers->reg, \
pRegDisplay->pCurrentContextPointers->reg); \
pContextRecord->reg = *pRegDisplay->pCurrentContextPointers->reg; \
} \
else \
{ \
HANDLE_NULL_CONTEXT_POINTER; \
} \
if (pAbortContext) \
{ \
pAbortContext->reg = pContextRecord->reg; \
} \
} while (0)
#if defined(_TARGET_X86_)
UPDATEREG(Ebx);
UPDATEREG(Esi);
UPDATEREG(Edi);
UPDATEREG(Ebp);
#elif defined(_TARGET_AMD64_)
UPDATEREG(Rbx);
UPDATEREG(Rbp);
#ifndef UNIX_AMD64_ABI
UPDATEREG(Rsi);
UPDATEREG(Rdi);
#endif
UPDATEREG(R12);
UPDATEREG(R13);
UPDATEREG(R14);
UPDATEREG(R15);
#elif defined(_TARGET_ARM_)
UPDATEREG(R4);
UPDATEREG(R5);
UPDATEREG(R6);
UPDATEREG(R7);
UPDATEREG(R8);
UPDATEREG(R9);
UPDATEREG(R10);
UPDATEREG(R11);
#elif defined(_TARGET_ARM64_)
UPDATEREG(X19);
UPDATEREG(X20);
UPDATEREG(X21);
UPDATEREG(X22);
UPDATEREG(X23);
UPDATEREG(X24);
UPDATEREG(X25);
UPDATEREG(X26);
UPDATEREG(X27);
UPDATEREG(X28);
UPDATEREG(Fp);
#else
PORTABILITY_ASSERT("ExceptionTracker::UpdateNonvolatileRegisters");
#endif
#undef UPDATEREG
}
#ifndef _DEBUG
#define DebugLogExceptionRecord(pExceptionRecord)
#else // _DEBUG
#define LOG_FLAG(name) \
if (flags & name) \
{ \
LOG((LF_EH, LL_INFO100, "" #name " ")); \
} \
void DebugLogExceptionRecord(EXCEPTION_RECORD* pExceptionRecord)
{
ULONG flags = pExceptionRecord->ExceptionFlags;
EH_LOG((LL_INFO100, ">>exr: %p, code: %08x, addr: %p, flags: 0x%02x ", pExceptionRecord, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, flags));
LOG_FLAG(EXCEPTION_NONCONTINUABLE);
LOG_FLAG(EXCEPTION_UNWINDING);
LOG_FLAG(EXCEPTION_EXIT_UNWIND);
LOG_FLAG(EXCEPTION_STACK_INVALID);
LOG_FLAG(EXCEPTION_NESTED_CALL);
LOG_FLAG(EXCEPTION_TARGET_UNWIND);
LOG_FLAG(EXCEPTION_COLLIDED_UNWIND);
LOG((LF_EH, LL_INFO100, "\n"));
}
LPCSTR DebugGetExceptionDispositionName(EXCEPTION_DISPOSITION disp)
{
switch (disp)
{
case ExceptionContinueExecution: return "ExceptionContinueExecution";
case ExceptionContinueSearch: return "ExceptionContinueSearch";
case ExceptionNestedException: return "ExceptionNestedException";
case ExceptionCollidedUnwind: return "ExceptionCollidedUnwind";
default:
UNREACHABLE_MSG("Invalid EXCEPTION_DISPOSITION!");
}
}
#endif // _DEBUG
bool ExceptionTracker::IsStackOverflowException()
{
if (m_pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException)
{
return true;
}
return false;
}
UINT_PTR ExceptionTracker::CallCatchHandler(CONTEXT* pContextRecord, bool* pfAborting /*= NULL*/)
{
CONTRACTL
{
MODE_COOPERATIVE;
GC_TRIGGERS;
THROWS;
PRECONDITION(CheckPointer(pContextRecord, NULL_OK));
}
CONTRACTL_END;
UINT_PTR uResumePC = 0;
ULONG_PTR ulRelOffset;
StackFrame sfStackFp = m_sfResumeStackFrame;
Thread* pThread = m_pThread;
MethodDesc* pMD = m_pMethodDescOfCatcher;
bool fIntercepted = false;
ThreadExceptionState* pExState = pThread->GetExceptionState();
#if defined(DEBUGGING_SUPPORTED)
// If the exception is intercepted, use the information stored in the DebuggerExState to resume the
// exception instead of calling the catch clause (there may not even be one).
if (pExState->GetFlags()->DebuggerInterceptInfo())
{
_ASSERTE(pMD != NULL);
// retrieve the interception information
pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&(sfStackFp.SP), &ulRelOffset, NULL);
PCODE pStartAddress = pMD->GetNativeCode();
EECodeInfo codeInfo(pStartAddress);
_ASSERTE(codeInfo.IsValid());
// Note that the value returned for ulRelOffset is actually the offset,
// so we need to adjust it to get the actual IP.
_ASSERTE(FitsIn<DWORD>(ulRelOffset));
uResumePC = codeInfo.GetJitManager()->GetCodeAddressForRelOffset(codeInfo.GetMethodToken(), static_cast<DWORD>(ulRelOffset));
// Either we haven't set m_uResumeStackFrame (for unhandled managed exceptions), or we have set it
// and it equals to MemoryStackFp.
_ASSERTE(m_sfResumeStackFrame.IsNull() || m_sfResumeStackFrame == sfStackFp);
fIntercepted = true;
}
#endif // DEBUGGING_SUPPORTED
_ASSERTE(!sfStackFp.IsNull());
m_sfResumeStackFrame.Clear();
m_pMethodDescOfCatcher = NULL;
_ASSERTE(pContextRecord);
//
// call the handler
//
EH_LOG((LL_INFO100, " calling catch at 0x%p\n", m_uCatchToCallPC));
// do not call the catch clause if the exception is intercepted
if (!fIntercepted)
{
_ASSERTE(m_uCatchToCallPC != 0 && m_pClauseForCatchToken != NULL);
uResumePC = CallHandler(m_uCatchToCallPC, sfStackFp, &m_ClauseForCatch, pMD, Catch X86_ARG(pContextRecord) ARM_ARG(pContextRecord) ARM64_ARG(pContextRecord));
}
else
{
// Since the exception has been intercepted and we could resuming execution at any
// user-specified arbitary location, reset the EH clause index and EstablisherFrame
// we may have saved for addressing any potential ThreadAbort raise.
//
// This is done since the saved EH clause index is related to the catch block executed,
// which does not happen in interception. As user specifies where we resume execution,
// we let that behaviour override the index and pretend as if we have no index available.
m_dwIndexClauseForCatch = 0;
m_sfEstablisherOfActualHandlerFrame.Clear();
m_sfCallerOfActualHandlerFrame.Clear();
}
EH_LOG((LL_INFO100, " resume address should be 0x%p\n", uResumePC));
//
// Our tracker may have gone away at this point, don't reference it.
//
return FinishSecondPass(pThread, uResumePC, sfStackFp, pContextRecord, this, pfAborting);
}
// static
UINT_PTR ExceptionTracker::FinishSecondPass(
Thread* pThread,
UINT_PTR uResumePC,
StackFrame sf,
CONTEXT* pContextRecord,
ExceptionTracker* pTracker,
bool* pfAborting /*= NULL*/)
{
CONTRACTL
{
MODE_COOPERATIVE;
GC_NOTRIGGER;
NOTHROW;
PRECONDITION(CheckPointer(pThread, NULL_NOT_OK));
PRECONDITION(CheckPointer((void*)uResumePC, NULL_NOT_OK));
PRECONDITION(CheckPointer(pContextRecord, NULL_OK));
}
CONTRACTL_END;
// Between the time when we pop the ExceptionTracker for the current exception and the time
// when we actually resume execution, it is unsafe to start a funclet-skipping stackwalk.
// So we set a flag here to indicate that we are in this time window. The only user of this
// information right now is the profiler.
ThreadExceptionFlagHolder tefHolder(ThreadExceptionState::TEF_InconsistentExceptionState);
#ifdef DEBUGGING_SUPPORTED
// This must be done before we pop the trackers.
BOOL fIntercepted = pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo();
#endif // DEBUGGING_SUPPORTED
// Since we may [re]raise ThreadAbort post the catch block execution,
// save the index, and Establisher, of the EH clause corresponding to the handler
// we just executed before we release the tracker. This will be used to ensure that reraise
// proceeds forward and not get stuck in a loop. Refer to
// ExceptionTracker::ProcessManagedCallFrame for details.
DWORD ehClauseCurrentHandlerIndex = pTracker->GetCatchHandlerExceptionClauseIndex();
StackFrame sfEstablisherOfActualHandlerFrame = pTracker->GetEstablisherOfActualHandlingFrame();
EH_LOG((LL_INFO100, "second pass finished\n"));
EH_LOG((LL_INFO100, "cleaning up ExceptionTracker state\n"));
// Release the exception trackers till the current (specified) frame.
ExceptionTracker::PopTrackers(sf, true);
// This will set the last thrown to be either null if we have handled all the exceptions in the nested chain or
// to whatever the current exception is.
//
// In a case when we're nested inside another catch block, the domain in which we're executing may not be the
// same as the one the domain of the throwable that was just made the current throwable above. Therefore, we
// make a special effort to preserve the domain of the throwable as we update the the last thrown object.
//
// If an exception is active, we dont want to reset the LastThrownObject to NULL as the active exception
// might be represented by a tracker created in the second pass (refer to
// CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass to understand how exception trackers can be
// created in the 2nd pass on 64bit) that does not have a throwable attached to it. Thus, if this exception
// is caught in the VM and it attempts to get the LastThrownObject using GET_THROWABLE macro, then it should be available.
//
// But, if the active exception tracker remains consistent in the 2nd pass (which will happen if the exception is caught
// in managed code), then the call to SafeUpdateLastThrownObject below will automatically update the LTO as per the
// active exception.
if (!pThread->GetExceptionState()->IsExceptionInProgress())
{
pThread->SafeSetLastThrownObject(NULL);
}
// Sync managed exception state, for the managed thread, based upon any active exception tracker
pThread->SyncManagedExceptionState(false);
//
// If we are aborting, we should not resume execution. Instead, we raise another
// exception. However, we do this by resuming execution at our thread redirecter
// function (RedirectForThrowControl), which is the same process we use for async
// thread stops. This redirecter function will cover the stack frame and register
// stack frame and then throw an exception. When we first see the exception thrown
// by this redirecter, we fixup the context for the thread stackwalk by copying
// pThread->m_OSContext into the dispatcher context and restarting the exception
// dispatch. As a result, we need to save off the "correct" resume context before
// we resume so the exception processing can work properly after redirect. A side
// benefit of this mechanism is that it makes synchronous and async thread abort
// use exactly the same codepaths.
//
UINT_PTR uAbortAddr = 0;
#if defined(DEBUGGING_SUPPORTED)
// Don't honour thread abort requests at this time for intercepted exceptions.
if (fIntercepted)
{
uAbortAddr = 0;
}
else
#endif // !DEBUGGING_SUPPORTED
{
CopyOSContext(pThread->m_OSContext, pContextRecord);
SetIP(pThread->m_OSContext, (PCODE)uResumePC);
uAbortAddr = (UINT_PTR)COMPlusCheckForAbort(uResumePC);
}
if (uAbortAddr)
{
if (pfAborting != NULL)
{
*pfAborting = true;
}
EH_LOG((LL_INFO100, "thread abort in progress, resuming thread under control...\n"));
// We are aborting, so keep the reference to the current EH clause index.
// We will use this when the exception is reraised and we begin commencing
// exception dispatch. This is done in ExceptionTracker::ProcessOSExceptionNotification.
//
// The "if" condition below can be false if the exception has been intercepted (refer to
// ExceptionTracker::CallCatchHandler for details)
if ((ehClauseCurrentHandlerIndex > 0) && (!sfEstablisherOfActualHandlerFrame.IsNull()))
{
pThread->m_dwIndexClauseForCatch = ehClauseCurrentHandlerIndex;
pThread->m_sfEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame;
}
CONSISTENCY_CHECK(CheckPointer(pContextRecord));
STRESS_LOG1(LF_EH, LL_INFO10, "resume under control: ip: %p\n", uResumePC);
#ifdef _TARGET_AMD64_
pContextRecord->Rcx = uResumePC;
#elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
// On ARM & ARM64, we save off the original PC in Lr. This is the same as done
// in HandleManagedFault for H/W generated exceptions.
pContextRecord->Lr = uResumePC;
#endif
uResumePC = uAbortAddr;
}
CONSISTENCY_CHECK(pThread->DetermineIfGuardPagePresent());
EH_LOG((LL_INFO100, "FinishSecondPass complete, uResumePC = %p, current SP = %p\n", uResumePC, GetCurrentSP()));
return uResumePC;
}
// On CoreARM, the MemoryStackFp is ULONG when passed by RtlDispatchException,
// unlike its 64bit counterparts.
EXTERN_C EXCEPTION_DISPOSITION
ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord
WIN64_ARG(IN ULONG64 MemoryStackFp)
NOT_WIN64_ARG(IN ULONG MemoryStackFp),
IN OUT PCONTEXT pContextRecord,
IN OUT PDISPATCHER_CONTEXT pDispatcherContext
)
{
//
// This method doesn't always return, so it will leave its
// state on the thread if using dynamic contracts.
//
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_THROWS;
// We must preserve this so that GCStress=4 eh processing doesnt kill last error.
DWORD dwLastError = GetLastError();
EXCEPTION_DISPOSITION returnDisposition = ExceptionContinueSearch;
STRESS_LOG5(LF_EH, LL_INFO10, "Processing exception at establisher=%p, ip=%p disp->cxr: %p, sp: %p, cxr @ exception: %p\n",
MemoryStackFp, pDispatcherContext->ControlPc,
pDispatcherContext->ContextRecord,
GetSP(pDispatcherContext->ContextRecord), pContextRecord);
AMD64_ONLY(STRESS_LOG3(LF_EH, LL_INFO10, " rbx=%p, rsi=%p, rdi=%p\n", pContextRecord->Rbx, pContextRecord->Rsi, pContextRecord->Rdi));
// sample flags early on because we may change pExceptionRecord below
// if we are seeing a STATUS_UNWIND_CONSOLIDATE
DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags;
Thread* pThread = GetThread();
// Stack Overflow is handled specially by the CLR EH mechanism. In fact
// there are cases where we aren't in managed code, but aren't quite in
// known unmanaged code yet either...
//
// These "boundary code" cases include:
// - in JIT helper methods which don't have a frame
// - in JIT helper methods before/during frame setup
// - in FCALL before/during frame setup
//
// In those cases on x86 we take special care to start our unwind looking
// for a handler which is below the last explicit frame which has been
// established on the stack as it can't reliably crawl the stack frames
// above that.
// NOTE: see code in the CLRVectoredExceptionHandler() routine.
//
// From the perspective of the EH subsystem, we can handle unwind correctly
// even without erecting a transition frame on WIN64. However, since the GC
// uses the stackwalker to update object references, and since the stackwalker
// relies on transition frame, we still cannot let an exception be handled
// by an unprotected managed frame.
//
// This code below checks to see if a SO has occurred outside of managed code.
// If it has, and if we don't have a transition frame higher up the stack, then
// we don't handle the SO.
if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
{
if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
{
// We don't need to unwind the frame chain here because we have backstop
// personality routines at the U2M boundary to handle do that. They are
// the personality routines of CallDescrWorker() and UMThunkStubCommon().
//
// See VSW 471619 for more information.
// We should be in cooperative mode if we are going to handle the SO.
// We track SO state for the thread.
EEPolicy::HandleStackOverflow(SOD_ManagedFrameHandler, (void*)MemoryStackFp);
FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
return ExceptionContinueSearch;
}
else
{
#ifdef FEATURE_STACK_PROBE
if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
{
RetailStackProbe(static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)), pThread);
}
#endif
}
}
else
{
DWORD exceptionCode = pExceptionRecord->ExceptionCode;
if (exceptionCode == STATUS_UNWIND)
// If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord,
// therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to
// look at our saved exception code.
exceptionCode = GetCurrentExceptionCode();
if (IsSOExceptionCode(exceptionCode))
{
return ExceptionContinueSearch;
}
}
BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
StackFrame sf((UINT_PTR)MemoryStackFp);
{
GCX_COOP();
// Update the current establisher frame
if (dwExceptionFlags & EXCEPTION_UNWINDING)
{
ExceptionTracker *pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker();
if (pCurrentTracker != NULL)
{
pCurrentTracker->SetCurrentEstablisherFrame(sf);
}
}
#ifdef _DEBUG
Thread::ObjectRefFlush(pThread);
#endif // _DEBUG
}
//
// begin Early Processing
//
{
#ifndef USE_REDIRECT_FOR_GCSTRESS
if (IsGcMarker(pContextRecord, pExceptionRecord))
{
returnDisposition = ExceptionContinueExecution;
goto lExit;
}
#endif // !USE_REDIRECT_FOR_GCSTRESS
EH_LOG((LL_INFO100, "..................................................................................\n"));
EH_LOG((LL_INFO100, "ProcessCLRException enter, sp = 0x%p, ControlPc = 0x%p\n", MemoryStackFp, pDispatcherContext->ControlPc));
DebugLogExceptionRecord(pExceptionRecord);
if (STATUS_UNWIND_CONSOLIDATE == pExceptionRecord->ExceptionCode)
{
EH_LOG((LL_INFO100, "STATUS_UNWIND_CONSOLIDATE, retrieving stored exception record\n"));
_ASSERTE(pExceptionRecord->NumberParameters >= 7);
pExceptionRecord = (EXCEPTION_RECORD*)pExceptionRecord->ExceptionInformation[6];
DebugLogExceptionRecord(pExceptionRecord);
}
CONSISTENCY_CHECK_MSG(!DebugIsEECxxException(pExceptionRecord), "EE C++ Exception leaked into managed code!!\n");
}
//
// end Early Processing (tm) -- we're now into really processing an exception for managed code
//
if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
{
// If the exception is a breakpoint, but outside of the runtime or managed code,
// let it go. It is not ours, so someone else will handle it, or we'll see
// it again as an unhandled exception.
if ((pExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
(pExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP))
{
// It is a breakpoint; is it from the runtime or managed code?
PCODE ip = GetIP(pContextRecord); // IP of the fault.
BOOL fExternalException = FALSE;
BEGIN_SO_INTOLERANT_CODE_NOPROBE;
fExternalException = (!ExecutionManager::IsManagedCode(ip) &&
!IsIPInModule(g_pMSCorEE, ip));
END_SO_INTOLERANT_CODE_NOPROBE;
if (fExternalException)
{
// The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as
// an unhandled exception.)
returnDisposition = ExceptionContinueSearch;
goto lExit;
}
}
}
{
BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord);
// we already fixed the context in HijackHandler, so let's
// just clear the thread state.
pThread->ResetThrowControlForThread();
ExceptionTracker::StackTraceState STState;
ExceptionTracker* pTracker = ExceptionTracker::GetOrCreateTracker(
pDispatcherContext->ControlPc,
sf,
pExceptionRecord,
pContextRecord,
bAsynchronousThreadStop,
!(dwExceptionFlags & EXCEPTION_UNWINDING),
&STState);
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
// Only setup the Corruption Severity in the first pass
if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
{
// Switch to COOP mode
GCX_COOP();
if (pTracker && pTracker->GetThrowable() != NULL)
{
// Setup the state in current exception tracker indicating the corruption severity
// of the active exception.
CEHelper::SetupCorruptionSeverityForActiveException((STState == ExceptionTracker::STS_FirstRethrowFrame), (pTracker->GetPreviousExceptionTracker() != NULL),
CEHelper::ShouldTreatActiveExceptionAsNonCorrupting());
}
// Failfast if exception indicates corrupted process state
if (pTracker->GetCorruptionSeverity() == ProcessCorrupting)
EEPOLICY_HANDLE_FATAL_ERROR(pExceptionRecord->ExceptionCode);
}
#endif // FEATURE_CORRUPTING_EXCEPTIONS
{
// Switch to COOP mode since we are going to work
// with throwable
GCX_COOP();
if (pTracker->GetThrowable() != NULL)
{
BOOL fIsThrownExceptionAV = FALSE;
OBJECTREF oThrowable = NULL;
GCPROTECT_BEGIN(oThrowable);
oThrowable = pTracker->GetThrowable();
// Check if we are dealing with AV or not and if we are,
// ensure that this is a real AV and not managed AV exception
if ((pExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) &&
(MscorlibBinder::GetException(kAccessViolationException) == oThrowable->GetMethodTable()))
{
// Its an AV - set the flag
fIsThrownExceptionAV = TRUE;
}
GCPROTECT_END();
// Did we get an AV?
if (fIsThrownExceptionAV == TRUE)
{
// Get the escalation policy action for handling AV
EPolicyAction actionAV = GetEEPolicy()->GetActionOnFailure(FAIL_AccessViolation);
// Valid actions are: eNoAction (default behviour) or eRudeExitProcess
_ASSERTE(((actionAV == eNoAction) || (actionAV == eRudeExitProcess)));
if (actionAV == eRudeExitProcess)
{
LOG((LF_EH, LL_INFO100, "ProcessCLRException: AccessViolation handler found and doing RudeExitProcess due to escalation policy (eRudeExitProcess)\n"));
// EEPolicy::HandleFatalError will help us RudeExit the process.
// RudeExitProcess due to AV is to prevent a security risk - we are ripping
// at the boundary, without looking for the handlers.
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
}
}
}
}
#ifndef FEATURE_PAL // Watson is on Windows only
// Setup bucketing details for nested exceptions (rethrow and non-rethrow) only if we are in the first pass
if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
{
ExceptionTracker *pPrevEHTracker = pTracker->GetPreviousExceptionTracker();
if (pPrevEHTracker != NULL)
{
SetStateForWatsonBucketing((STState == ExceptionTracker::STS_FirstRethrowFrame), pPrevEHTracker->GetThrowableAsHandle());
}
}
#endif //!FEATURE_PAL
CLRUnwindStatus status;
#ifdef USE_PER_FRAME_PINVOKE_INIT
// Refer to comment in ProcessOSExceptionNotification about ICF and codegen difference.
InlinedCallFrame *pICFSetAsLimitFrame = NULL;
#endif // USE_PER_FRAME_PINVOKE_INIT
status = pTracker->ProcessOSExceptionNotification(
pExceptionRecord,
pContextRecord,
pDispatcherContext,
dwExceptionFlags,
sf,
pThread,
STState
#ifdef USE_PER_FRAME_PINVOKE_INIT
, (PVOID)pICFSetAsLimitFrame
#endif // USE_PER_FRAME_PINVOKE_INIT
);
if (FirstPassComplete == status)
{
EH_LOG((LL_INFO100, "first pass finished, found handler, TargetFrameSp = %p\n",
pDispatcherContext->EstablisherFrame));
SetLastError(dwLastError);
#ifndef FEATURE_PAL
//
// At this point (the end of the 1st pass) we don't know where
// we are going to resume to. So, we pass in an address, which
// lies in NULL pointer partition of the memory, as the target IP.
//
// Once we reach the target frame in the second pass unwind, we call
// the catch funclet that caused us to resume execution and it
// tells us where we are resuming to. At that point, we patch
// the context record with the resume IP and RtlUnwind2 finishes
// by restoring our context at the right spot.
//
// If we are unable to set the resume PC for some reason, then
// the OS will try to resume at the NULL partition address and the
// attempt will fail due to AV, resulting in failfast, helping us
// isolate problems in patching the IP.
ClrUnwindEx(pExceptionRecord,
(UINT_PTR)pThread,
INVALID_RESUME_ADDRESS,
pDispatcherContext->EstablisherFrame);
UNREACHABLE();
//
// doesn't return
//
#else
// On Unix, we will return ExceptionStackUnwind back to the custom
// exception dispatch system. When it sees this disposition, it will
// know that we want to handle the exception and will commence unwind
// via the custom unwinder.
return ExceptionStackUnwind;
#endif // FEATURE_PAL
}
else if (SecondPassComplete == status)
{
bool fAborting = false;
UINT_PTR uResumePC = (UINT_PTR)-1;
UINT_PTR uOriginalSP = GetSP(pContextRecord);
Frame* pLimitFrame = pTracker->GetLimitFrame();
pDispatcherContext->ContextRecord = pContextRecord;
// We may be in COOP mode at this point - the indefinite switch was done
// in ExceptionTracker::ProcessManagedCallFrame.
//
// However, if a finally was invoked non-exceptionally and raised an exception
// that was caught in its parent method, unwind will result in invoking any applicable termination
// handlers in the finally funclet and thus, also switching the mode to COOP indefinitely.
//
// Since the catch block to be executed will lie in the parent method,
// we will skip frames till we reach the parent and in the process, switch back to PREEMP mode
// as control goes back to the OS.
//
// Upon reaching the target of unwind, we wont call ExceptionTracker::ProcessManagedCallFrame (since any
// handlers in finally or surrounding it will be invoked when we unwind finally funclet). Thus,
// we may not be in COOP mode.
//
// Since CallCatchHandler expects to be in COOP mode, perform the switch here.
GCX_COOP_NO_DTOR();
uResumePC = pTracker->CallCatchHandler(pContextRecord, &fAborting);
{
//
// GC must NOT occur after the handler has returned until
// we resume at the new address because the stackwalker
// EnumGcRefs would try and report things as live from the
// try body, that were probably reported dead from the
// handler body.
//
// GC must NOT occur once the frames have been popped because
// the values in the unwound CONTEXT are not GC-protected.
//
GCX_FORBID();
CONSISTENCY_CHECK((UINT_PTR)-1 != uResumePC);
// Ensure we are not resuming to the invalid target IP we had set at the end of
// first pass
_ASSERTE_MSG(INVALID_RESUME_ADDRESS != uResumePC, "CallCatchHandler returned invalid resume PC!");
//
// CallCatchHandler freed the tracker.
//
INDEBUG(pTracker = (ExceptionTracker*)POISONC);
// Note that we should only fail to fix up for SO.
bool fFixedUp = FixNonvolatileRegisters(uOriginalSP, pThread, pContextRecord, fAborting);
_ASSERTE(fFixedUp || (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW));
CONSISTENCY_CHECK(pLimitFrame > dac_cast<PTR_VOID>(GetSP(pContextRecord)));
#ifdef USE_PER_FRAME_PINVOKE_INIT
if (pICFSetAsLimitFrame != NULL)
{
_ASSERTE(pICFSetAsLimitFrame == pLimitFrame);
// Mark the ICF as inactive (by setting the return address as NULL).
// It will be marked as active at the next PInvoke callsite.
//
// This ensures that any stackwalk post the catch handler but before
// the next pinvoke callsite does not see the frame as active.
pICFSetAsLimitFrame->Reset();
}
#endif // USE_PER_FRAME_PINVOKE_INIT
pThread->SetFrame(pLimitFrame);
FixContext(pContextRecord);
SetIP(pContextRecord, (PCODE)uResumePC);
}
#ifdef STACK_GUARDS_DEBUG
// We are transitioning back to managed code, so ensure that we are in
// SO-tolerant mode before we do so.
RestoreSOToleranceState();
#endif
RESET_CONTRACT_VIOLATION();
ExceptionTracker::ResumeExecution(pContextRecord,
NULL
);
UNREACHABLE();
}
}
lExit: ;
EH_LOG((LL_INFO100, "returning %s\n", DebugGetExceptionDispositionName(returnDisposition)));
CONSISTENCY_CHECK( !((dwExceptionFlags & EXCEPTION_TARGET_UNWIND) && (ExceptionContinueSearch == returnDisposition)));
if ((ExceptionContinueSearch == returnDisposition))
{
GCX_PREEMP_NO_DTOR();
}
END_CONTRACT_VIOLATION;
SetLastError(dwLastError);
return returnDisposition;
}
// When we hit a native exception such as an AV in managed code, we put up a FaultingExceptionFrame which saves all the
// non-volatile registers. The GC may update these registers if they contain object references. However, the CONTEXT
// with which we are going to resume execution doesn't have these updated values. Thus, we need to fix up the non-volatile
// registers in the CONTEXT with the updated ones stored in the FaultingExceptionFrame. To do so properly, we need
// to perform a full stackwalk.
bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
Thread* pThread,
CONTEXT* pContextRecord,
bool fAborting
)
{
CONTRACTL
{
MODE_COOPERATIVE;
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
CONTEXT _ctx = {0};
// Ctor will initialize it to NULL
REGDISPLAY regdisp;
pThread->FillRegDisplay(&regdisp, &_ctx);
bool fFound = ExceptionTracker::FindNonvolatileRegisterPointers(pThread, uOriginalSP, &regdisp, GetFP(pContextRecord));
if (!fFound)
{
return false;
}
{
//
// GC must NOT occur once the frames have been popped because
// the values in the unwound CONTEXT are not GC-protected.
//
GCX_FORBID();
ExceptionTracker::UpdateNonvolatileRegisters(pContextRecord, &regdisp, fAborting);
}
return true;
}
// static
void ExceptionTracker::InitializeCrawlFrameForExplicitFrame(CrawlFrame* pcfThisFrame, Frame* pFrame, MethodDesc *pMD)
{
CONTRACTL
{
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(pFrame != FRAME_TOP);
}
CONTRACTL_END;
INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame)));
pcfThisFrame->isFrameless = false;
pcfThisFrame->pFrame = pFrame;
pcfThisFrame->pFunc = pFrame->GetFunction();
if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr() &&
!InlinedCallFrame::FrameHasActiveCall(pFrame))
{
// Inactive ICFs in IL stubs contain the true interop MethodDesc which must be
// reported in the stack trace.
if (pMD->IsILStub() && pMD->AsDynamicMethodDesc()->HasMDContextArg())
{
// Report interop MethodDesc
pcfThisFrame->pFunc = ((InlinedCallFrame *)pFrame)->GetActualInteropMethodDesc();
_ASSERTE(pcfThisFrame->pFunc != NULL);
_ASSERTE(pcfThisFrame->pFunc->SanityCheck());
}
}
pcfThisFrame->pFirstGSCookie = NULL;
pcfThisFrame->pCurGSCookie = NULL;
}
// This method will initialize the RegDisplay in the CrawlFrame with the correct state for current and caller context
// See the long description of contexts and their validity in ExceptionTracker::InitializeCrawlFrame for details.
void ExceptionTracker::InitializeCurrentContextForCrawlFrame(CrawlFrame* pcfThisFrame, PT_DISPATCHER_CONTEXT pDispatcherContext, StackFrame sfEstablisherFrame)
{
CONTRACTL
{
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(IsInFirstPass());
}
CONTRACTL_END;
if (IsInFirstPass())
{
REGDISPLAY *pRD = pcfThisFrame->pRD;
#ifndef USE_CURRENT_CONTEXT_IN_FILTER
INDEBUG(memset(pRD->pCurrentContext, 0xCC, sizeof(*(pRD->pCurrentContext))));
// Ensure that clients can tell the current context isn't valid.
SetIP(pRD->pCurrentContext, 0);
#else // !USE_CURRENT_CONTEXT_IN_FILTER
RestoreNonvolatileRegisters(pRD->pCurrentContext, pDispatcherContext->CurrentNonVolatileContextRecord);
RestoreNonvolatileRegisterPointers(pRD->pCurrentContextPointers, pDispatcherContext->CurrentNonVolatileContextRecord);
#endif // USE_CURRENT_CONTEXT_IN_FILTER
*(pRD->pCallerContext) = *(pDispatcherContext->ContextRecord);
pRD->IsCallerContextValid = TRUE;
pRD->SP = sfEstablisherFrame.SP;
pRD->ControlPC = pDispatcherContext->ControlPc;
#ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
pcfThisFrame->pRD->IsCallerSPValid = TRUE;
// Assert our first pass assumptions for the Arm/Arm64
_ASSERTE(sfEstablisherFrame.SP == GetSP(pDispatcherContext->ContextRecord));
#endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
}
EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCurrentContextForCrawlFrame: DispatcherContext->ControlPC = %p; IP in DispatcherContext->ContextRecord = %p.\n",
pDispatcherContext->ControlPc, GetIP(pDispatcherContext->ContextRecord)));
}
// static
void ExceptionTracker::InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pThread, StackFrame sf, REGDISPLAY* pRD,
PDISPATCHER_CONTEXT pDispatcherContext, DWORD_PTR ControlPCForEHSearch,
UINT_PTR* puMethodStartPC,
ExceptionTracker *pCurrentTracker)
{
CONTRACTL
{
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame)));
pcfThisFrame->pRD = pRD;
#ifdef FEATURE_INTERPRETER
pcfThisFrame->pFrame = NULL;
#endif // FEATURE_INTERPRETER
// Initialize the RegDisplay from DC->ContextRecord. DC->ControlPC always contains the IP
// in the frame for which the personality routine was invoked.
//
// <AMD64>
//
// During 1st pass, DC->ContextRecord contains the context of the caller of the frame for which personality
// routine was invoked. On the other hand, in the 2nd pass, it contains the context of the frame for which
// personality routine was invoked.
//
// </AMD64>
//
// <ARM and ARM64>
//
// In the first pass on ARM & ARM64:
//
// 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time
// the current managed method was invoked and thus, is the SP of the caller. This is
// the value of DispatcherContext->EstablisherFrame as well.
// 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality
// routine has been invoked.
// 3) DispatcherContext->ContextRecord contains the context record of the caller (and thus, IP
// in the caller). Most of the times, these values will be distinct. However, recursion
// may result in them being the same (case "run2" of baseservices\Regression\V1\Threads\functional\CS_TryFinally.exe
// is an example). In such a case, we ensure that EstablisherFrame value is the same as
// the SP in DispatcherContext->ContextRecord (which is (1) above).
//
// In second pass on ARM & ARM64:
//
// 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time
// the current managed method was invoked and thus, is the SP of the caller. This is
// the value of DispatcherContext->EstablisherFrame as well.
// 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality
// routine has been invoked.
// 3) DispatcherContext->ContextRecord contains the context record of the current managed method
// for which the personality routine is invoked.
//
// </ARM and ARM64>
pThread->InitRegDisplay(pcfThisFrame->pRD, pDispatcherContext->ContextRecord, true);
bool fAdjustRegdisplayControlPC = false;
// The "if" check below is trying to determine when we have a valid current context in DC->ContextRecord and whether, or not,
// RegDisplay needs to be fixed up to set SP and ControlPC to have the values for the current frame for which personality routine
// is invoked.
//
// We do this based upon the current pass for the exception tracker as this will also handle the case when current frame
// and its caller have the same return address (i.e. ControlPc). This can happen in cases when, due to certain JIT optimizations, the following callstack
//
// A -> B -> A -> C
//
// Could get transformed to the one below when B is inlined in the first (left-most) A resulting in:
//
// A -> A -> C
//
// In this case, during 1st pass, when personality routine is invoked for the second A, DC->ControlPc could have the same
// value as DC->ContextRecord->Rip even though the DC->ContextRecord actually represents caller context (of first A).
// As a result, we will not initialize the value of SP and controlPC in RegDisplay for the current frame (frame for
// which personality routine was invoked - second A in the optimized scenario above) resulting in frame specific lookup (e.g.
// GenericArgType) to happen incorrectly (against first A).
//
// Thus, we should always use the pass identification in ExceptionTracker to determine when we need to perform the fixup below.
if (pCurrentTracker->IsInFirstPass())
{
pCurrentTracker->InitializeCurrentContextForCrawlFrame(pcfThisFrame, pDispatcherContext, sf);
}
else
{
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
// See the comment above the call to InitRegDisplay for this assertion.
_ASSERTE(pDispatcherContext->ControlPc == GetIP(pDispatcherContext->ContextRecord));
#endif // _TARGET_ARM_ || _TARGET_ARM64_
#ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
// Simply setup the callerSP during the second pass in the caller context.
// This is used in setting up the "EnclosingClauseCallerSP" in ExceptionTracker::ProcessManagedCallFrame
// when the termination handlers are invoked.
::SetSP(pcfThisFrame->pRD->pCallerContext, sf.SP);
pcfThisFrame->pRD->IsCallerSPValid = TRUE;
#endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
}
#ifdef ADJUST_PC_UNWOUND_TO_CALL
// Further below, we will adjust the ControlPC based upon whether we are at a callsite or not.
// We need to do this for "RegDisplay.ControlPC" field as well so that when data structures like
// EECodeInfo initialize themselves using this field, they will have the correct absolute value
// that is in sync with the "relOffset" we calculate below.
//
// However, we do this *only* when "ControlPCForEHSearch" is the same as "DispatcherContext->ControlPC",
// indicating we are not using the thread-abort reraise loop prevention logic.
//
if (pDispatcherContext->ControlPc == ControlPCForEHSearch)
{
// Since DispatcherContext->ControlPc is used to initialize the
// RegDisplay.ControlPC field, assert that it is the same
// as the ControlPC we are going to use to initialize the CrawlFrame
// with as well.
_ASSERTE(pcfThisFrame->pRD->ControlPC == ControlPCForEHSearch);
fAdjustRegdisplayControlPC = true;
}
#endif // ADJUST_PC_UNWOUND_TO_CALL
#if defined(_TARGET_ARM_)
// Remove the Thumb bit
ControlPCForEHSearch = ThumbCodeToDataPointer<DWORD_PTR, DWORD_PTR>(ControlPCForEHSearch);
#endif
#ifdef ADJUST_PC_UNWOUND_TO_CALL
// If the OS indicated that the IP is a callsite, then adjust the ControlPC by decrementing it
// by two. This is done because unwinding at callsite will make ControlPC point to the
// instruction post the callsite. If a protected region ends "at" the callsite, then
// not doing this adjustment will result in a one-off error that can result in us not finding
// a handler.
//
// For async exceptions (e.g. AV), this will be false.
//
// We decrement by two to be in accordance with how the kernel does as well.
if (pDispatcherContext->ControlPcIsUnwound)
{
ControlPCForEHSearch -= STACKWALK_CONTROLPC_ADJUST_OFFSET;
if (fAdjustRegdisplayControlPC == true)
{
// Once the check above is removed, the assignment below should
// be done unconditionally.
pcfThisFrame->pRD->ControlPC = ControlPCForEHSearch;
// On ARM & ARM64, the IP is either at the callsite (post the adjustment above)
// or at the instruction at which async exception took place.
pcfThisFrame->isIPadjusted = true;
}
}
#endif // ADJUST_PC_UNWOUND_TO_CALL
pcfThisFrame->codeInfo.Init(ControlPCForEHSearch);
if (pcfThisFrame->codeInfo.IsValid())
{
pcfThisFrame->isFrameless = true;
pcfThisFrame->pFunc = pcfThisFrame->codeInfo.GetMethodDesc();
*puMethodStartPC = pcfThisFrame->codeInfo.GetStartAddress();
}
else
{
pcfThisFrame->isFrameless = false;
pcfThisFrame->pFunc = NULL;
*puMethodStartPC = NULL;
}
pcfThisFrame->pThread = pThread;
pcfThisFrame->hasFaulted = false;
Frame* pTopFrame = pThread->GetFrame();
pcfThisFrame->isIPadjusted = (FRAME_TOP != pTopFrame) && (pTopFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr());
if (pcfThisFrame->isFrameless && (pcfThisFrame->isIPadjusted == false) && (pcfThisFrame->GetRelOffset() == 0))
{
// If we are here, then either a hardware generated exception happened at the first instruction
// of a managed method an exception was thrown at that location.
//
// Adjusting IP in such a case will lead us into unknown code - it could be native code or some
// other JITted code.
//
// Hence, we will flag that the IP is already adjusted.
pcfThisFrame->isIPadjusted = true;
EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCrawlFrame: Exception at offset zero of the method (MethodDesc %p); setting IP as adjusted.\n",
pcfThisFrame->pFunc));
}
pcfThisFrame->pFirstGSCookie = NULL;
pcfThisFrame->pCurGSCookie = NULL;
pcfThisFrame->isFilterFuncletCached = FALSE;
}
bool ExceptionTracker::UpdateScannedStackRange(StackFrame sf, bool fIsFirstPass)
{
CONTRACTL
{
// Since this function will modify the scanned stack range, which is also accessed during the GC stackwalk,
// we invoke it in COOP mode so that that access to the range is synchronized.
MODE_COOPERATIVE;
GC_TRIGGERS;
THROWS;
}
CONTRACTL_END;
//
// collapse trackers if a nested exception passes a previous exception
//
HandleNestedExceptionEscape(sf, fIsFirstPass);
//
// update stack bounds
//
BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
if (m_ScannedStackRange.Contains(sf))
{
// If we're unwinding to find the resume frame and we're examining the topmost previously scanned frame,
// then we can't ignore it because we could resume here due to an escaped nested exception.
if (!fUnwindingToFindResumeFrame || (m_ScannedStackRange.GetUpperBound() != sf))
{
// been there, done that.
EH_LOG((LL_INFO100, " IGNOREFRAME: This frame has been processed already\n"));
return false;
}
}
else
{
if (sf < m_ScannedStackRange.GetLowerBound())
{
m_ScannedStackRange.ExtendLowerBound(sf);
}
if (sf > m_ScannedStackRange.GetUpperBound())
{
m_ScannedStackRange.ExtendUpperBound(sf);
}
DebugLogTrackerRanges(" C");
}
return true;
}
void CheckForRudeAbort(Thread* pThread, bool fIsFirstPass)
{
if (fIsFirstPass && pThread->IsRudeAbort())
{
GCX_COOP();
OBJECTREF rudeAbortThrowable = CLRException::GetPreallocatedRudeThreadAbortException();
if (pThread->GetThrowable() != rudeAbortThrowable)
{
pThread->SafeSetThrowables(rudeAbortThrowable);
}
if (!pThread->IsRudeAbortInitiated())
{
pThread->PreWorkForThreadAbort();
}
}
}
void ExceptionTracker::FirstPassIsComplete()
{
m_ExceptionFlags.ResetUnwindingToFindResumeFrame();
m_pSkipToParentFunctionMD = NULL;
}
void ExceptionTracker::SecondPassIsComplete(MethodDesc* pMD, StackFrame sfResumeStackFrame)
{
EH_LOG((LL_INFO100, " second pass unwind completed\n"));
m_pMethodDescOfCatcher = pMD;
m_sfResumeStackFrame = sfResumeStackFrame;
}
CLRUnwindStatus ExceptionTracker::ProcessOSExceptionNotification(
PEXCEPTION_RECORD pExceptionRecord,
PCONTEXT pContextRecord,
PDISPATCHER_CONTEXT pDispatcherContext,
DWORD dwExceptionFlags,
StackFrame sf,
Thread* pThread,
StackTraceState STState
#ifdef USE_PER_FRAME_PINVOKE_INIT
, PVOID pICFSetAsLimitFrame
#endif // USE_PER_FRAME_PINVOKE_INIT
)
{
CONTRACTL
{
MODE_ANY;
GC_TRIGGERS;
THROWS;
}
CONTRACTL_END;
CLRUnwindStatus status = UnwindPending;
CrawlFrame cfThisFrame;
REGDISPLAY regdisp;
UINT_PTR uMethodStartPC;
UINT_PTR uCallerSP;
DWORD_PTR ControlPc = pDispatcherContext->ControlPc;
ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, &regdisp, pDispatcherContext, ControlPc, &uMethodStartPC, this);
#ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
uCallerSP = EECodeManager::GetCallerSp(cfThisFrame.pRD);
#else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
uCallerSP = sf.SP;
#endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
EH_LOG((LL_INFO100, "ProcessCrawlFrame: PSP: " FMT_ADDR " EstablisherFrame: " FMT_ADDR "\n", DBG_ADDR(uCallerSP), DBG_ADDR(sf.SP)));
bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING);
bool fTargetUnwind = !!(dwExceptionFlags & EXCEPTION_TARGET_UNWIND);
// If a thread abort was raised after a catch block's execution, we would have saved
// the index and EstablisherFrame of the EH clause corresponding to the handler that executed.
// Fetch that locally and reset the state against the thread if we are in the unwind pass.
//
// It should be kept in mind that by the virtue of copying the information below, we will
// have it available for the first frame seen during the unwind pass (which will be the
// frame where ThreadAbort was raised after the catch block) for us to skip any termination
// handlers that may be present prior to the EH clause whose index we saved.
DWORD dwTACatchHandlerClauseIndex = pThread->m_dwIndexClauseForCatch;
StackFrame sfEstablisherOfActualHandlerFrame = pThread->m_sfEstablisherOfActualHandlerFrame;
if (!fIsFirstPass)
{
pThread->m_dwIndexClauseForCatch = 0;
pThread->m_sfEstablisherOfActualHandlerFrame.Clear();
}
bool fProcessThisFrame = false;
bool fCrawlFrameIsDirty = false;
// <GC_FUNCLET_REFERENCE_REPORTING>
//
// Refer to the detailed comment in ExceptionTracker::ProcessManagedCallFrame for more context.
// In summary, if we have reached the target of the unwind, then we need to fix CallerSP (for
// GC reference reporting) if we have been asked to.
//
// This will be done only when we reach the frame that is handling the exception.
//
// </GC_FUNCLET_REFERENCE_REPORTING>
if (fTargetUnwind && (m_fFixupCallerSPForGCReporting == true))
{
m_fFixupCallerSPForGCReporting = false;
this->m_EnclosingClauseInfoForGCReporting.SetEnclosingClauseCallerSP(uCallerSP);
}
#ifdef USE_PER_FRAME_PINVOKE_INIT
// Refer to detailed comment below.
PTR_Frame pICFForUnwindTarget = NULL;
#endif // USE_PER_FRAME_PINVOKE_INIT
CheckForRudeAbort(pThread, fIsFirstPass);
bool fIsFrameLess = cfThisFrame.IsFrameless();
GSCookie* pGSCookie = NULL;
bool fSetLastUnwoundEstablisherFrame = false;
//
// process any frame since the last frame we've seen
//
{
GCX_COOP_THREAD_EXISTS(pThread);
// UpdateScannedStackRange needs to be invoked in COOP mode since
// the stack range can also be accessed during GC stackwalk.
fProcessThisFrame = UpdateScannedStackRange(sf, fIsFirstPass);
MethodDesc *pMD = cfThisFrame.GetFunction();
Frame* pFrame = GetLimitFrame(); // next frame to process
if (pFrame != FRAME_TOP)
{
// The following function call sets the GS cookie pointers and checks the cookie.
cfThisFrame.SetCurGSCookie(Frame::SafeGetGSCookiePtr(pFrame));
}
while (((UINT_PTR)pFrame) < uCallerSP)
{
#ifdef USE_PER_FRAME_PINVOKE_INIT
// InlinedCallFrames (ICF) are allocated, initialized and linked to the Frame chain
// by the code generated by the JIT for a method containing a PInvoke.
//
// On X64, JIT generates code to dynamically link and unlink the ICF around
// each PInvoke call. On ARM, on the other hand, JIT's codegen, in context of ICF,
// is more inline with X86 and thus, it links in the ICF at the start of the method
// and unlinks it towards the method end. Thus, ICF is present on the Frame chain
// at any given point so long as the method containing the PInvoke is on the stack.
//
// Now, if the method containing ICF catches an exception, we will reset the Frame chain
// with the LimitFrame, that is computed below, after the catch handler returns. Since this
// computation is done relative to the CallerSP (on both X64 and ARM), we will end up
// removing the ICF from the Frame chain as that will always be below (stack growing down)
// the CallerSP since it lives in the stack space of the current managed frame.
//
// As a result, if there is another PInvoke call after the catch block, it will expect
// the ICF to be present and without one, execution will go south.
//
// To account for this ICF codegen difference, in the EH system we check if the current
// Frame is an ICF or not. If it is and lies inside the current managed method, we
// keep a reference to it and reset the LimitFrame to this saved reference before we
// return back to invoke the catch handler.
//
// Thus, if there is another PInvoke call post the catch handler, it will find ICF as expected.
//
// This is based upon the following assumptions:
//
// 1) There will be no other explicit Frame inserted above the ICF inside the
// managed method containing ICF. That is, ICF is the top-most explicit frame
// in the managed method (and thus, lies in the current managed frame).
//
// 2) There is only one ICF per managed method containing one (or more) PInvoke(s).
//
// 3) We only do this if the current frame is the one handling the exception. This is to
// address the scenario of keeping any ICF from frames lower in the stack active.
//
// 4) The ExceptionUnwind method of the ICF is a no-op. As noted above, we save a reference
// to the ICF and yet continue to process the frame chain. During unwind, this implies
// that we will end up invoking the ExceptionUnwind methods of all frames that lie
// below the caller SP of the managed frame handling the exception. And since the handling
// managed frame contains an ICF, it will be the topmost frame that will lie
// below the callerSP for which we will invoke ExceptionUnwind.
//
// Thus, ICF::ExceptionUnwind should not do anything significant. If any of these assumptions
// break, then the next best thing will be to make the JIT link/unlink the frame dynamically.
if (fTargetUnwind && (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr()))
{
PTR_InlinedCallFrame pICF = (PTR_InlinedCallFrame)pFrame;
// Does it live inside the current managed method? It will iff:
//
// 1) ICF address is higher than the current frame's SP (which we get from DispatcherContext), AND
// 2) ICF address is below callerSP.
if ((GetSP(pDispatcherContext->ContextRecord) < (TADDR)pICF) &&
((UINT_PTR)pICF < uCallerSP))
{
pICFForUnwindTarget = pFrame;
}
}
#endif // defined(_TARGET_ARM_)
cfThisFrame.CheckGSCookies();
if (fProcessThisFrame)
{
ExceptionTracker::InitializeCrawlFrameForExplicitFrame(&cfThisFrame, pFrame, pMD);
fCrawlFrameIsDirty = true;
status = ProcessExplicitFrame(
&cfThisFrame,
sf,
fIsFirstPass,
STState);
cfThisFrame.CheckGSCookies();
}
if (!fIsFirstPass)
{
//
// notify Frame of unwind
//
pFrame->ExceptionUnwind();
// If we have not yet set the initial explicit frame processed by this tracker, then
// set it now.
if (m_pInitialExplicitFrame == NULL)
{
m_pInitialExplicitFrame = pFrame;
}
}
pFrame = pFrame->Next();
m_pLimitFrame = pFrame;
if (UnwindPending != status)
{
goto lExit;
}
}
if (fCrawlFrameIsDirty)
{
// If crawlframe is dirty, it implies that it got modified as part of explicit frame processing. Thus, we shall
// reinitialize it here.
ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, &regdisp, pDispatcherContext, ControlPc, &uMethodStartPC, this);
}
if (fIsFrameLess)
{
pGSCookie = (GSCookie*)cfThisFrame.GetCodeManager()->GetGSCookieAddr(cfThisFrame.pRD,
&cfThisFrame.codeInfo,
&cfThisFrame.codeManState);
if (pGSCookie)
{
// The following function call sets the GS cookie pointers and checks the cookie.
cfThisFrame.SetCurGSCookie(pGSCookie);
}
status = HandleFunclets(&fProcessThisFrame, fIsFirstPass,
cfThisFrame.GetFunction(), cfThisFrame.IsFunclet(), sf);
}
if ((!fIsFirstPass) && (!fProcessThisFrame))
{
// If we are unwinding and not processing the current frame, it implies that
// this frame has been unwound for one of the following reasons:
//
// 1) We have already seen it due to nested exception processing, OR
// 2) We are skipping frames to find a funclet's parent and thus, its been already
// unwound.
//
// If the current frame is NOT the target of unwind, update the last unwound
// establisher frame. We don't do this for "target of unwind" since it has the catch handler, for a
// duplicate EH clause reported in the funclet, that needs to be invoked and thus, may have valid
// references to report for GC reporting.
//
// If we are not skipping the managed frame, then LastUnwoundEstablisherFrame will be updated later in this method,
// just before we return back to our caller.
if (!fTargetUnwind)
{
SetLastUnwoundEstablisherFrame(sf);
fSetLastUnwoundEstablisherFrame = true;
}
}
// GCX_COOP_THREAD_EXISTS ends here and we may switch to preemp mode now (if applicable).
}
//
// now process managed call frame if needed
//
if (fIsFrameLess)
{
if (fProcessThisFrame)
{
status = ProcessManagedCallFrame(
&cfThisFrame,
sf,
StackFrame::FromEstablisherFrame(pDispatcherContext->EstablisherFrame),
pExceptionRecord,
STState,
uMethodStartPC,
dwExceptionFlags,
dwTACatchHandlerClauseIndex,
sfEstablisherOfActualHandlerFrame);
if (pGSCookie)
{
cfThisFrame.CheckGSCookies();
}
}
if (fTargetUnwind && (UnwindPending == status))
{
SecondPassIsComplete(cfThisFrame.GetFunction(), sf);
status = SecondPassComplete;
}
}
lExit:
// If we are unwinding and have returned successfully from unwinding the frame, then mark it as the last unwound frame for the current
// exception. We don't do this if the frame is target of unwind (i.e. handling the exception) since catch block invocation may have references to be
// reported (if a GC happens during catch block invocation).
//
// If an exception escapes out of a funclet (this is only possible for fault/finally/catch clauses), then we will not return here.
// Since this implies that the funclet no longer has any valid references to report, we will need to set the LastUnwoundEstablisherFrame
// close to the point we detect the exception has escaped the funclet. This is done in ExceptionTracker::CallHandler and marks the
// frame that invoked (and thus, contained) the funclet as the LastUnwoundEstablisherFrame.
//
// Note: Do no add any GC triggering code between the return from ProcessManagedCallFrame and setting of the LastUnwoundEstablisherFrame
if ((!fIsFirstPass) && (!fTargetUnwind) && (!fSetLastUnwoundEstablisherFrame))
{
GCX_COOP();
SetLastUnwoundEstablisherFrame(sf);
}
if (FirstPassComplete == status)
{
FirstPassIsComplete();
}
if (fTargetUnwind && (status == SecondPassComplete))
{
#ifdef USE_PER_FRAME_PINVOKE_INIT
// If we have got a ICF to set as the LimitFrame, do that now.
// The Frame chain is still intact and would be updated using
// the LimitFrame (done after the catch handler returns).
//
// NOTE: This should be done as the last thing before we return
// back to invoke the catch handler.
if (pICFForUnwindTarget != NULL)
{
m_pLimitFrame = pICFForUnwindTarget;
pICFSetAsLimitFrame = (PVOID)pICFForUnwindTarget;
}
#endif // USE_PER_FRAME_PINVOKE_INIT
// Since second pass is complete and we have reached
// the frame containing the catch funclet, reset the enclosing
// clause SP for the catch funclet, if applicable, to be the CallerSP of the
// current frame.
//
// Refer to the detailed comment about this code
// in ExceptionTracker::ProcessManagedCallFrame.
if (m_fResetEnclosingClauseSPForCatchFunclet)
{
#ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
// DispatcherContext->EstablisherFrame's value
// represents the CallerSP of the current frame.
UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)pDispatcherContext->EstablisherFrame;
#else // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
// Extract the CallerSP from RegDisplay
REGDISPLAY *pRD = cfThisFrame.GetRegisterSet();
_ASSERTE(pRD->IsCallerContextValid || pRD->IsCallerSPValid);
UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)GetSP(pRD->pCallerContext);
#endif // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
m_EnclosingClauseInfo = EnclosingClauseInfo(false, cfThisFrame.GetRelOffset(), EnclosingClauseCallerSP);
}
m_fResetEnclosingClauseSPForCatchFunclet = FALSE;
}
// If we are unwinding and the exception was not caught in managed code and we have reached the
// topmost frame we saw in the first pass, then reset thread abort state if this is the last managed
// code personality routine on the stack.
if ((fIsFirstPass == false) && (this->GetTopmostStackFrameFromFirstPass() == sf) && (GetCatchToCallPC() == NULL))
{
ExceptionTracker::ResetThreadAbortStatus(pThread, &cfThisFrame, sf);
}
//
// fill in the out parameter
//
return status;
}
// static
void ExceptionTracker::DebugLogTrackerRanges(__in_z const char *pszTag)
{
#ifdef _DEBUG
CONTRACTL
{
MODE_ANY;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
Thread* pThread = GetThread();
ExceptionTracker* pTracker = pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL;
int i = 0;
while (pTracker)
{
EH_LOG((LL_INFO100, "%s:|%02d| %p: (%p %p) %s\n", pszTag, i, pTracker, pTracker->m_ScannedStackRange.GetLowerBound().SP, pTracker->m_ScannedStackRange.GetUpperBound().SP,
pTracker->IsInFirstPass() ? "1st pass" : "2nd pass"
));
pTracker = pTracker->m_pPrevNestedInfo;
i++;
}
#endif // _DEBUG
}
bool ExceptionTracker::HandleNestedExceptionEscape(StackFrame sf, bool fIsFirstPass)
{
CONTRACTL
{
// Since this function can modify the scanned stack range, which is also accessed during the GC stackwalk,
// we invoke it in COOP mode so that that access to the range is synchronized.
MODE_COOPERATIVE;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
bool fResult = false;
DebugLogTrackerRanges(" A");
ExceptionTracker* pPreviousTracker = m_pPrevNestedInfo;
while (pPreviousTracker && pPreviousTracker->m_ScannedStackRange.IsSupersededBy(sf))
{
//
// If the previous tracker (representing exception E1 and whose scanned stack range is superseded by the current frame)
// is in the first pass AND current tracker (representing exceptio E2) has not seen the current frame AND we are here,
// it implies that we had a nested exception while the previous tracker was in the first pass.
//
// This can happen in the following scenarios:
//
// 1) An exception escapes a managed filter (which are invoked in the first pass). However,
// that is not possible since any exception escaping them is swallowed by the runtime.
// If someone does longjmp from within the filter, then that is illegal and unsupported.
//
// 2) While processing an exception (E1), either us or native code caught it, triggering unwind. However, before the
// first managed frame was processed for unwind, another native frame (below the first managed frame on the stack)
// did a longjmp to go past us or raised another exception from one of their termination handlers.
//
// Thus, we will never get a chance to switch our tracker for E1 to 2nd pass (which would be done when
// ExceptionTracker::GetOrCreateTracker will be invoked for the first managed frame) since the longjmp, or the
// new-exception would result in a new tracker being setup.
//
// Below is an example of such a case that does longjmp
// ----------------------------------------------------
//
// NativeA (does setjmp) -> ManagedFunc -> NativeB
//
//
// NativeB could be implemented as:
//
// __try { // raise exception } __finally { longjmp(jmp1, 1); }
//
// "jmp1" is the jmp_buf setup by NativeA by calling setjmp.
//
// ManagedFunc could be implemented as:
//
// try {
// try { NativeB(); }
// finally { Console.WriteLine("Finally in ManagedFunc"); }
// }
// catch(Exception ex} { Console.WriteLine("Caught"); }
//
//
// In case of nested exception, we combine the stack range (see below) since we have already seen those frames
// in the specified pass for the previous tracker. However, in the example above, the current tracker (in 2nd pass)
// has not see the frames which the previous tracker (which is in the first pass) has seen.
//
// On a similar note, the __finally in the example above could also do a "throw 1;". In such a case, we would expect
// that the catch in ManagedFunc would catch the exception (since "throw 1;" would be represented as SEHException in
// the runtime). However, during first pass, when the exception enters ManagedFunc, the current tracker would not have
// processed the ManagedFunc frame, while the previous tracker (for E1) would have. If we proceed to combine the stack
// ranges, we will omit examining the catch clause in ManagedFunc.
//
// Thus, we cannot combine the stack range yet and must let each frame, already scanned by the previous
// tracker, be also processed by the current (longjmp) tracker if not already done.
//
// Note: This is not a concern if the previous tracker (for exception E1) is in the second pass since any escaping exception (E2)
// would come out of a finally/fault funclet and the runtime's funclet skipping logic will deal with it correctly.
if (pPreviousTracker->IsInFirstPass() && (!this->m_ScannedStackRange.Contains(sf)))
{
// Allow all stackframes seen by previous tracker to be seen by the current
// tracker as well.
if (sf <= pPreviousTracker->m_ScannedStackRange.GetUpperBound())
{
EH_LOG((LL_INFO100, " - not updating current tracker bounds for escaped exception since\n"));
EH_LOG((LL_INFO100, " - active tracker (%p; %s) has not seen the current frame [", this, this->IsInFirstPass()?"FirstPass":"SecondPass"));
EH_LOG((LL_INFO100, " - SP = %p", sf.SP));
EH_LOG((LL_INFO100, "]\n"));
EH_LOG((LL_INFO100, " - which the previous (%p) tracker has processed.\n", pPreviousTracker));
return fResult;
}
}
EH_LOG((LL_INFO100, " nested exception ESCAPED\n"));
EH_LOG((LL_INFO100, " - updating current tracker stack bounds\n"));
m_ScannedStackRange.CombineWith(sf, &pPreviousTracker->m_ScannedStackRange);
//
// Only the topmost tracker can be in the first pass.
//
// (Except in the case where we have an exception thrown in a filter,
// which should never escape the filter, and thus, will never supersede
// the previous exception. This is why we cannot walk the entire list
// of trackers to assert that they're all in the right mode.)
//
// CONSISTENCY_CHECK(!pPreviousTracker->IsInFirstPass());
// If our modes don't match, don't actually delete the supersceded exception.
// If we did, we would lose valueable state on which frames have been scanned
// on the second pass if an exception is thrown during the 2nd pass.
// Advance the current tracker pointer now, since it may be deleted below.
pPreviousTracker = pPreviousTracker->m_pPrevNestedInfo;
if (!fIsFirstPass)
{
// During unwind, at each frame we collapse exception trackers only once i.e. there cannot be multiple
// exception trackers that are collapsed at each frame. Store the information of collapsed exception
// tracker in current tracker to be able to find the parent frame when nested exception escapes.
m_csfEHClauseOfCollapsedTracker = m_pPrevNestedInfo->m_EHClauseInfo.GetCallerStackFrameForEHClause();
m_EnclosingClauseInfoOfCollapsedTracker = m_pPrevNestedInfo->m_EnclosingClauseInfoForGCReporting;
EH_LOG((LL_INFO100, " - removing previous tracker\n"));
ExceptionTracker* pTrackerToFree = m_pPrevNestedInfo;
m_pPrevNestedInfo = pTrackerToFree->m_pPrevNestedInfo;
#if defined(DEBUGGING_SUPPORTED)
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->DeleteInterceptContext(pTrackerToFree->m_DebuggerExState.GetDebuggerInterceptContext());
}
#endif // DEBUGGING_SUPPORTED
CONSISTENCY_CHECK(pTrackerToFree->IsValid());
FreeTrackerMemory(pTrackerToFree, memBoth);
}
DebugLogTrackerRanges(" B");
}
return fResult;
}
CLRUnwindStatus ExceptionTracker::ProcessExplicitFrame(
CrawlFrame* pcfThisFrame,
StackFrame sf,
BOOL fIsFirstPass,
StackTraceState& STState
)
{
CONTRACTL
{
MODE_COOPERATIVE;
GC_TRIGGERS;
THROWS;
PRECONDITION(!pcfThisFrame->IsFrameless());
PRECONDITION(pcfThisFrame->GetFrame() != FRAME_TOP);
}
CONTRACTL_END;
Frame* pFrame = pcfThisFrame->GetFrame();
EH_LOG((LL_INFO100, " [ ProcessExplicitFrame: pFrame: " FMT_ADDR " pMD: " FMT_ADDR " %s PASS ]\n", DBG_ADDR(pFrame), DBG_ADDR(pFrame->GetFunction()), fIsFirstPass ? "FIRST" : "SECOND"));
if (FRAME_TOP == pFrame)
{
goto lExit;
}
if (!m_ExceptionFlags.UnwindingToFindResumeFrame())
{
//
// update our exception stacktrace
//
BOOL bReplaceStack = FALSE;
BOOL bSkipLastElement = FALSE;
if (STS_FirstRethrowFrame == STState)
{
bSkipLastElement = TRUE;
}
else
if (STS_NewException == STState)
{
bReplaceStack = TRUE;
}
// Normally, we need to notify the profiler in two cases:
// 1) a brand new exception is thrown, and
// 2) an exception is rethrown.
// However, in this case, if the explicit frame doesn't correspond to a MD, we don't set STState to STS_Append,
// so the next managed call frame we process will give another ExceptionThrown() callback to the profiler.
// So we give the callback below, only in the case when we append to the stack trace.
MethodDesc* pMD = pcfThisFrame->GetFunction();
if (pMD)
{
Thread* pThread = m_pThread;
if (fIsFirstPass)
{
//
// notify profiler of new/rethrown exception
//
if (bSkipLastElement || bReplaceStack)
{
GCX_COOP();
EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack);
}
//
// Update stack trace
//
m_StackTraceInfo.AppendElement(CanAllocateMemory(), NULL, sf.SP, pMD, pcfThisFrame);
m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement);
//
// make callback to debugger and/or profiler
//
#if defined(DEBUGGING_SUPPORTED)
if (ExceptionTracker::NotifyDebuggerOfStub(pThread, sf, pFrame))
{
// Deliver the FirstChanceNotification after the debugger, if not already delivered.
if (!this->DeliveredFirstChanceNotification())
{
ExceptionNotifications::DeliverFirstChanceNotification();
}
}
#endif // DEBUGGING_SUPPORTED
STState = STS_Append;
}
}
}
lExit:
return UnwindPending;
}
CLRUnwindStatus ExceptionTracker::HandleFunclets(bool* pfProcessThisFrame, bool fIsFirstPass,
MethodDesc * pMD, bool fFunclet, StackFrame sf)
{
CONTRACTL
{
MODE_ANY;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
//
// handle out-of-line finallys
//
// In the second pass, we always want to execute this code.
// In the first pass, we only execute this code if we are not unwinding to find the resume frame.
// We do this to avoid calling the same filter more than once. Search for "UnwindingToFindResumeFrame"
// to find a more elaborate comment in ProcessManagedCallFrame().
// If we are in the first pass and we are unwinding to find the resume frame, then make sure the flag is cleared.
if (fIsFirstPass && fUnwindingToFindResumeFrame)
{
m_pSkipToParentFunctionMD = NULL;
}
else
{
// <TODO>
// this 'skip to parent function MD' code only seems to be needed
// in the case where we call a finally funclet from the normal
// execution codepath. Is there a better way to achieve the same
// goal? Also, will recursion break us in any corner cases?
// [ThrowInFinallyNestedInTryTest]
// [GoryManagedPresentTest]
// </TODO>
// <TODO>
// this was done for AMD64, but i don't understand why AMD64 needed the workaround..
// (the workaround is the "double call on parent method" part.)
// </TODO>
//
// If we encounter a funclet, we need to skip all call frames up
// to and including its parent method call frame. The reason
// behind this is that a funclet is logically part of the parent
// method has all the clauses that covered its logical location
// in the parent covering its body.
//
if (((UINT_PTR)m_pSkipToParentFunctionMD) & 1)
{
EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: skipping to parent\n"));
*pfProcessThisFrame = false;
if ((((UINT_PTR)pMD) == (((UINT_PTR)m_pSkipToParentFunctionMD) & ~((UINT_PTR)1))) && !fFunclet)
{
EH_LOG((LL_INFO100, " SKIPTOPARENT: found parent for funclet pMD = %p, sf.SP = %p, will stop skipping frames\n", pMD, sf.SP));
_ASSERTE(0 == (((UINT_PTR)sf.SP) & 1));
m_pSkipToParentFunctionMD = (MethodDesc*)sf.SP;
_ASSERTE(!fUnwindingToFindResumeFrame);
}
}
else if (fFunclet)
{
EH_LOG((LL_INFO100, " SKIPTOPARENT: found funclet pMD = %p, will start skipping frames\n", pMD));
_ASSERTE(0 == (((UINT_PTR)pMD) & 1));
m_pSkipToParentFunctionMD = (MethodDesc*)(((UINT_PTR)pMD) | 1);
}
else
{
if (sf.SP == ((UINT_PTR)m_pSkipToParentFunctionMD))
{
EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: got double call on parent method\n"));
*pfProcessThisFrame = false;
}
else if (m_pSkipToParentFunctionMD && (sf.SP > ((UINT_PTR)m_pSkipToParentFunctionMD)))
{
EH_LOG((LL_INFO100, " SKIPTOPARENT: went past parent method\n"));
m_pSkipToParentFunctionMD = NULL;
}
}
}
return UnwindPending;
}
CLRUnwindStatus ExceptionTracker::ProcessManagedCallFrame(
CrawlFrame* pcfThisFrame,
StackFrame sf,
StackFrame sfEstablisherFrame,
EXCEPTION_RECORD* pExceptionRecord,
StackTraceState STState,
UINT_PTR uMethodStartPC,
DWORD dwExceptionFlags,
DWORD dwTACatchHandlerClauseIndex,
StackFrame sfEstablisherOfActualHandlerFrame
)
{
CONTRACTL
{
MODE_ANY;
GC_TRIGGERS;
THROWS;
PRECONDITION(pcfThisFrame->IsFrameless());
}
CONTRACTL_END;
UINT_PTR uControlPC = (UINT_PTR)GetControlPC(pcfThisFrame->GetRegisterSet());
CLRUnwindStatus ReturnStatus = UnwindPending;
MethodDesc* pMD = pcfThisFrame->GetFunction();
bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING);
bool fIsFunclet = pcfThisFrame->IsFunclet();
CONSISTENCY_CHECK(IsValid());
CONSISTENCY_CHECK(ThrowableIsValid() || !fIsFirstPass);
CONSISTENCY_CHECK(pMD != 0);
EH_LOG((LL_INFO100, " [ ProcessManagedCallFrame this=%p, %s PASS ]\n", this, (fIsFirstPass ? "FIRST" : "SECOND")));
EH_LOG((LL_INFO100, " [ method: %s%s, %s ]\n",
(fIsFunclet ? "FUNCLET of " : ""),
pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName));
Thread *pThread = GetThread();
_ASSERTE (pThread);
INDEBUG( DumpClauses(pcfThisFrame->GetJitManager(), pcfThisFrame->GetMethodToken(), uMethodStartPC, uControlPC) );
bool fIsILStub = pMD->IsILStub();
bool fGiveDebuggerAndProfilerNotification = !fIsILStub;
BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
bool fIgnoreThisFrame = false;
bool fProcessThisFrameToFindResumeFrameOnly = false;
MethodDesc * pUserMDForILStub = NULL;
Frame * pILStubFrame = NULL;
if (fIsILStub && !fIsFunclet) // only make this callback on the main method body of IL stubs
pUserMDForILStub = GetUserMethodForILStub(pThread, sf.SP, pMD, &pILStubFrame);
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
BOOL fCanMethodHandleException = TRUE;
CorruptionSeverity currentSeverity = NotCorrupting;
{
// Switch to COOP mode since we are going to request throwable
GCX_COOP();
// We must defer to the MethodDesc of the user method instead of the IL stub
// itself because the user can specify the policy on a per-method basis and
// that won't be reflected via the IL stub's MethodDesc.
MethodDesc * pMDWithCEAttribute = (pUserMDForILStub != NULL) ? pUserMDForILStub : pMD;
// Check if the exception can be delivered to the method? It will check if the exception
// is a CE or not. If it is, it will check if the method can process it or not.
currentSeverity = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetCorruptionSeverity();
fCanMethodHandleException = CEHelper::CanMethodHandleException(currentSeverity, pMDWithCEAttribute);
}
#endif // FEATURE_CORRUPTING_EXCEPTIONS
// Doing rude abort. Skip all non-constrained execution region code.
// When rude abort is initiated, we cannot intercept any exceptions.
if ((pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame)))
{
// If we are unwinding to find the real resume frame, then we cannot ignore frames yet.
// We need to make sure we find the correct resume frame before starting to ignore frames.
if (fUnwindingToFindResumeFrame)
{
fProcessThisFrameToFindResumeFrameOnly = true;
}
else
{
EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort/CE\n"));
fIgnoreThisFrame = true;
}
}
//
// BEGIN resume frame processing code
//
// Often times, we'll run into the situation where the actual resume call frame
// is not the same call frame that we see the catch clause in. The reason for this
// is that catch clauses get duplicated down to cover funclet code ranges. When we
// see a catch clause covering our control PC, but it is marked as a duplicate, we
// need to continue to unwind until we find the same clause that isn't marked as a
// duplicate. This will be the correct resume frame.
//
// We actually achieve this skipping by observing that if we are catching at a
// duplicated clause, all the call frames we should be skipping have already been
// processed by a previous exception dispatch. So if we allow the unwind to
// continue, we will immediately bump into the ExceptionTracker for the previous
// dispatch, and our resume frame will be the last frame seen by that Tracker.
//
// Note that we will have visited all the EH clauses for a particular method when we
// see its first funclet (the funclet which is closest to the leaf). We need to make
// sure we don't process any EH clause again when we see other funclets or the parent
// method until we get to the real resume frame. The real resume frame may be another
// funclet, which is why we can't blindly skip all funclets until we see the parent
// method frame.
//
// If the exception is handled by the method, then UnwindingToFindResumeFrame takes
// care of the skipping. We basically skip everything when we are unwinding to find
// the resume frame. If the exception is not handled by the method, then we skip all the
// funclets until we get to the parent method. The logic to handle this is in
// HandleFunclets(). In the first pass, HandleFunclets() only kicks
// in if we are not unwinding to find the resume frame.
//
// Then on the second pass, we need to process frames up to the initial place where
// we saw the catch clause, which means upto and including part of the resume stack
// frame. Then we need to skip the call frames up to the real resume stack frame
// and resume.
//
// In the second pass, we have the same problem with skipping funclets as in the first
// pass. However, in this case, we know exactly which frame is our target unwind frame
// (EXCEPTION_TARGET_UNWIND will be set). So we blindly unwind until we see the parent
// method, or until the target unwind frame.
PTR_EXCEPTION_CLAUSE_TOKEN pLimitClauseToken = NULL;
if (!fIgnoreThisFrame && !fIsFirstPass && !m_sfResumeStackFrame.IsNull() && (sf >= m_sfResumeStackFrame))
{
EH_LOG((LL_INFO100, " RESUMEFRAME: sf is %p and m_sfResumeStackFrame: %p\n", sf.SP, m_sfResumeStackFrame.SP));
EH_LOG((LL_INFO100, " RESUMEFRAME: %s initial resume frame: %p\n", (sf == m_sfResumeStackFrame) ? "REACHED" : "PASSED" , m_sfResumeStackFrame.SP));
// process this frame to call handlers
EH_LOG((LL_INFO100, " RESUMEFRAME: Found last frame to process finallys in, need to process only part of call frame\n"));
EH_LOG((LL_INFO100, " RESUMEFRAME: Limit clause token: %p\n", m_pClauseForCatchToken));
pLimitClauseToken = m_pClauseForCatchToken;
// The limit clause is the same as the clause we're catching at. It is used
// as the last clause we process in the "inital resume frame". Anything further
// down the list of clauses is skipped along with all call frames up to the actual
// resume frame.
CONSISTENCY_CHECK_MSG(sf == m_sfResumeStackFrame, "Passed initial resume frame and fIgnoreThisFrame wasn't set!");
}
//
// END resume frame code
//
if (!fIgnoreThisFrame)
{
BOOL fFoundHandler = FALSE;
DWORD_PTR dwHandlerStartPC = NULL;
BOOL bReplaceStack = FALSE;
BOOL bSkipLastElement = FALSE;
bool fUnwindFinished = false;
if (STS_FirstRethrowFrame == STState)
{
bSkipLastElement = TRUE;
}
else
if (STS_NewException == STState)
{
bReplaceStack = TRUE;
}
// We need to notify the profiler on the first pass in two cases:
// 1) a brand new exception is thrown, and
// 2) an exception is rethrown.
if (fIsFirstPass && (bSkipLastElement || bReplaceStack))
{
GCX_COOP();
EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack);
}
if (!fUnwindingToFindResumeFrame)
{
//
// update our exception stacktrace, ignoring IL stubs
//
if (fIsFirstPass && !pMD->IsILStub())
{
GCX_COOP();
m_StackTraceInfo.AppendElement(CanAllocateMemory(), uControlPC, sf.SP, pMD, pcfThisFrame);
m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement);
}
//
// make callback to debugger and/or profiler
//
if (fGiveDebuggerAndProfilerNotification)
{
if (fIsFirstPass)
{
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pMD);
// Notfiy the debugger that we are on the first pass for a managed exception.
// Note that this callback is made for every managed frame.
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, uControlPC, sf.SP);
#if defined(DEBUGGING_SUPPORTED)
_ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker);
// check if the current exception has been intercepted.
if (m_ExceptionFlags.DebuggerInterceptInfo())
{
// According to the x86 implementation, we don't need to call the ExceptionSearchFunctionLeave()
// profiler callback.
StackFrame sfInterceptStackFrame;
m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL,
reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)),
NULL, NULL);
// Save the target unwind frame just like we do when we find a catch clause.
m_sfResumeStackFrame = sfInterceptStackFrame;
ReturnStatus = FirstPassComplete;
goto lExit;
}
#endif // DEBUGGING_SUPPORTED
// Attempt to deliver the first chance notification to the AD only *AFTER* the debugger
// has done that, provided we have not already delivered it.
if (!this->DeliveredFirstChanceNotification())
{
ExceptionNotifications::DeliverFirstChanceNotification();
}
}
else
{
#if defined(DEBUGGING_SUPPORTED)
_ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker);
// check if the exception is intercepted.
if (m_ExceptionFlags.DebuggerInterceptInfo())
{
MethodDesc* pInterceptMD = NULL;
StackFrame sfInterceptStackFrame;
// check if we have reached the interception point yet
m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptMD, NULL,
reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)),
NULL, NULL);
// If the exception has gone unhandled in the first pass, we wouldn't have a chance
// to set the target unwind frame. Check for this case now.
if (m_sfResumeStackFrame.IsNull())
{
m_sfResumeStackFrame = sfInterceptStackFrame;
}
_ASSERTE(m_sfResumeStackFrame == sfInterceptStackFrame);
if ((pInterceptMD == pMD) &&
(sfInterceptStackFrame == sf))
{
// If we have reached the stack frame at which the exception is intercepted,
// then finish the second pass prematurely.
SecondPassIsComplete(pMD, sf);
ReturnStatus = SecondPassComplete;
goto lExit;
}
}
#endif // DEBUGGING_SUPPORTED
// According to the x86 implementation, we don't need to call the ExceptionUnwindFunctionEnter()
// profiler callback when an exception is intercepted.
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pMD);
}
}
}
#ifdef FEATURE_STACK_PROBE
// Don't call a handler if we're within a certain distance of the end of the stack. Could end up here via probe, in
// which case guard page is intact, or via hard SO, in which case guard page won't be. So don't check for presence of
// guard page, just check for sufficient space on stack.
if ( IsStackOverflowException()
&& !pThread->CanResetStackTo((void*)sf.SP))
{
EH_LOG((LL_INFO100, " STACKOVERFLOW: IGNOREFRAME: stack frame too close to guard page: sf.SP: %p\n", sf.SP));
}
else
#endif // FEATURE_STACK_PROBE
{
IJitManager* pJitMan = pcfThisFrame->GetJitManager();
const METHODTOKEN& MethToken = pcfThisFrame->GetMethodToken();
EH_CLAUSE_ENUMERATOR EnumState;
unsigned EHCount;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
// The method cannot handle the exception (e.g. cannot handle the CE), then simply bail out
// without examining the EH clauses in it.
if (!fCanMethodHandleException)
{
LOG((LF_EH, LL_INFO100, "ProcessManagedCallFrame - CEHelper decided not to look for exception handlers in the method(MD:%p).\n", pMD));
// Set the flag to skip this frame since the CE cannot be delivered
_ASSERTE(currentSeverity == ProcessCorrupting);
// Force EHClause count to be zero
EHCount = 0;
}
else
#endif // FEATURE_CORRUPTING_EXCEPTIONS
{
EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState);
}
if (!fIsFirstPass)
{
// For a method that may have nested funclets, it is possible that a reference may be
// dead at the point where control flow left the method but may become active once
// a funclet is executed.
//
// Upon returning from the funclet but before the next funclet is invoked, a GC
// may happen if we are in preemptive mode. Since the GC stackwalk will commence
// at the original IP at which control left the method, it can result in the reference
// not being updated (since it was dead at the point control left the method) if the object
// is moved during GC.
//
// To address this, we will indefinitely switch to COOP mode while enumerating, and invoking,
// funclets.
//
// This switch is also required for another scenario: we may be in unwind phase and the current frame
// may not have any termination handlers to be invoked (i.e. it may have zero EH clauses applicable to
// the unwind phase). If we do not switch to COOP mode for such a frame, we could remain in preemp mode.
// Upon returning back from ProcessOSExceptionNotification in ProcessCLRException, when we attempt to
// switch to COOP mode to update the LastUnwoundEstablisherFrame, we could get blocked due to an
// active GC, prior to peforming the update.
//
// In this case, if the GC stackwalk encounters the current frame and attempts to check if it has been
// unwound by an exception, then while it has been unwound (especially since it had no termination handlers)
// logically, it will not figure out as unwound and thus, GC stackwalk would attempt to report references from
// it, which is incorrect.
//
// Thus, when unwinding, we will always switch to COOP mode indefinitely, irrespective of whether
// the frame has EH clauses to be processed or not.
GCX_COOP_NO_DTOR();
// We will also forbid any GC to happen between successive funclet invocations.
// This will be automatically undone when the contract goes off the stack as the method
// returns back to its caller.
BEGINFORBIDGC();
}
for (unsigned i = 0; i < EHCount; i++)
{
EE_ILEXCEPTION_CLAUSE EHClause;
PTR_EXCEPTION_CLAUSE_TOKEN pEHClauseToken = pJitMan->GetNextEHClause(&EnumState, &EHClause);
EH_LOG((LL_INFO100, " considering %s clause [%x,%x), ControlPc is %s clause (offset %x)",
(IsFault(&EHClause) ? "fault" :
(IsFinally(&EHClause) ? "finally" :
(IsFilterHandler(&EHClause) ? "filter" :
(IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
EHClause.TryStartPC,
EHClause.TryEndPC,
(ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()) ? "inside" : "outside"),
pcfThisFrame->GetRelOffset()
));
LOG((LF_EH, LL_INFO100, "\n"));
// If we have a valid EstablisherFrame for the managed frame where
// ThreadAbort was raised after the catch block, then see if we
// have reached that frame during the exception dispatch. If we
// have, then proceed to skip applicable EH clauses.
if ((!sfEstablisherOfActualHandlerFrame.IsNull()) && (sfEstablisherFrame == sfEstablisherOfActualHandlerFrame))
{
// We should have a valid index of the EH clause (corresponding to a catch block) after
// which thread abort was raised?
_ASSERTE(dwTACatchHandlerClauseIndex > 0);
{
// Since we have the index, check if the current EH clause index
// is less then saved index. If it is, then it implies that
// we are evaluating clauses that lie "before" the EH clause
// for the catch block "after" which thread abort was raised.
//
// Since ThreadAbort has to make forward progress, we will
// skip evaluating any such EH clauses. Two things can happen:
//
// 1) We will find clauses representing handlers beyond the
// catch block after which ThreadAbort was raised. Since this is
// what we want, we evaluate them.
//
// 2) There wont be any more clauses implying that the catch block
// after which the exception was raised was the outermost
// handler in the method. Thus, the exception will escape out,
// which is semantically the correct thing to happen.
//
// The premise of this check is based upon a JIT compiler's implementation
// detail: when it generates EH clauses, JIT compiler will order them from
// top->bottom (when reading a method) and inside->out when reading nested
// clauses.
//
// This assumption is not new since the basic EH type-matching is reliant
// on this very assumption. However, now we have one more candidate that
// gets to rely on it.
//
// Eventually, this enables forward progress of thread abort exception.
if (i <= (dwTACatchHandlerClauseIndex -1))
{
EH_LOG((LL_INFO100, " skipping the evaluation of EH clause (index=%d) since we cannot process an exception in a handler\n", i));
EH_LOG((LL_INFO100, " that exists prior to the one (index=%d) after which ThreadAbort was [re]raised.\n", dwTACatchHandlerClauseIndex));
continue;
}
}
}
// see comment above where we set pLimitClauseToken
if (pEHClauseToken == pLimitClauseToken)
{
EH_LOG((LL_INFO100, " found limit clause, stopping clause enumeration\n"));
// <GC_FUNCLET_REFERENCE_REPORTING>
//
// If we are here, the exception has been identified to be handled by a duplicate catch clause
// that is protecting the current funclet. The call to SetEnclosingClauseInfo (below)
// will setup the CallerSP (for GC reference reporting) to be the SP of the
// of the caller of current funclet (where the exception has happened, or is escaping from).
//
// However, we need the CallerSP to be set as the SP of the caller of the
// actual frame that will contain (and invoke) the catch handler corresponding to
// the duplicate clause. But that isn't available right now and we can only know
// once we unwind upstack to reach the target frame.
//
// Thus, upon reaching the target frame and before invoking the catch handler,
// we will fix up the CallerSP (for GC reporting) to be that of the caller of the
// target frame that will be invoking the actual catch handler.
//
// </GC_FUNCLET_REFERENCE_REPORTING>
//
// for catch clauses
SetEnclosingClauseInfo(fIsFunclet,
pcfThisFrame->GetRelOffset(),
GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
fUnwindFinished = true;
m_fFixupCallerSPForGCReporting = true;
break;
}
BOOL fTermHandler = IsFaultOrFinally(&EHClause);
fFoundHandler = FALSE;
if (( fIsFirstPass && fTermHandler) ||
(!fIsFirstPass && !fTermHandler))
{
continue;
}
if (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()))
{
EH_LOG((LL_INFO100, " clause covers ControlPC\n"));
dwHandlerStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.HandlerStartPC);
if (fUnwindingToFindResumeFrame)
{
CONSISTENCY_CHECK(fIsFirstPass);
if (!fTermHandler)
{
// m_pClauseForCatchToken can only be NULL for continuable exceptions, but we should never
// get here if we are handling continuable exceptions. fUnwindingToFindResumeFrame is
// only true at the end of the first pass.
_ASSERTE(m_pClauseForCatchToken != NULL);
// handlers match and not duplicate?
EH_LOG((LL_INFO100, " RESUMEFRAME: catch handler: [%x,%x], this handler: [%x,%x] %s\n",
m_ClauseForCatch.HandlerStartPC,
m_ClauseForCatch.HandlerEndPC,
EHClause.HandlerStartPC,
EHClause.HandlerEndPC,
IsDuplicateClause(&EHClause) ? "[duplicate]" : ""));
if ((m_ClauseForCatch.HandlerStartPC == EHClause.HandlerStartPC) &&
(m_ClauseForCatch.HandlerEndPC == EHClause.HandlerEndPC))
{
EH_LOG((LL_INFO100, " RESUMEFRAME: found clause with same handler as catch\n"));
if (!IsDuplicateClause(&EHClause))
{
CONSISTENCY_CHECK(fIsFirstPass);
if (fProcessThisFrameToFindResumeFrameOnly)
{
EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame, \
but rude thread abort is initiated: %p\n", sf.SP));
// We have found the real resume frame. However, rude thread abort
// has been initiated. Thus, we need to continue the first pass
// as if we have not found a handler yet. To do so, we need to
// reset all the information we have saved when we find the handler.
m_ExceptionFlags.ResetUnwindingToFindResumeFrame();
m_uCatchToCallPC = NULL;
m_pClauseForCatchToken = NULL;
m_sfResumeStackFrame.Clear();
ReturnStatus = UnwindPending;
}
else
{
EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame: %p\n", sf.SP));
// Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler
// that decided to handle the exception. We may need it
// if a ThreadAbort is raised after the catch block
// executes.
m_dwIndexClauseForCatch = i + 1;
m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame;
#ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD);
#else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
// On ARM & ARM64, the EstablisherFrame is the value of SP at the time a function was called and before it's prolog
// executed. Effectively, it is the SP of the caller.
m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP;
#endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
ReturnStatus = FirstPassComplete;
}
}
break;
}
}
}
else if (IsFilterHandler(&EHClause))
{
DWORD_PTR dwResult = EXCEPTION_CONTINUE_SEARCH;
DWORD_PTR dwFilterStartPC;
dwFilterStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.FilterOffset);
EH_LOG((LL_INFO100, " calling filter\n"));
// @todo : If user code throws a StackOveflowException and we have plenty of stack,
// we probably don't want to be so strict in not calling handlers.
if (! IsStackOverflowException())
{
// Save the current EHClause Index and Establisher of the clause post which
// ThreadAbort was raised. This is done an exception handled inside a filter
// reset the state that was setup before the filter was invoked.
//
// We dont have to do this for finally/fault clauses since they execute
// in the second pass and by that time, we have already skipped the required
// EH clauses in the applicable stackframe.
DWORD dwPreFilterTACatchHandlerClauseIndex = dwTACatchHandlerClauseIndex;
StackFrame sfPreFilterEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame;
EX_TRY
{
// We want to call filters even if the thread is aborting, so suppress abort
// checks while the filter runs.
ThreadPreventAsyncHolder preventAbort(TRUE);
// for filter clauses
SetEnclosingClauseInfo(fIsFunclet,
pcfThisFrame->GetRelOffset(),
GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
#ifdef USE_FUNCLET_CALL_HELPER
// On ARM & ARM64, the OS passes us the CallerSP for the frame for which personality routine has been invoked.
// Since IL filters are invoked in the first pass, we pass this CallerSP to the filter funclet which will
// then lookup the actual frame pointer value using it since we dont have a frame pointer to pass to it
// directly.
//
// Assert our invariants (we had set them up in InitializeCrawlFrame):
REGDISPLAY *pCurRegDisplay = pcfThisFrame->GetRegisterSet();
CONTEXT *pContext = NULL;
#ifndef USE_CURRENT_CONTEXT_IN_FILTER
// 1) In first pass, we dont have a valid current context IP
_ASSERTE(GetIP(pCurRegDisplay->pCurrentContext) == 0);
pContext = pCurRegDisplay->pCallerContext;
#else
pContext = pCurRegDisplay->pCurrentContext;
#endif // !USE_CURRENT_CONTEXT_IN_FILTER
#ifdef USE_CALLER_SP_IN_FUNCLET
// 2) Our caller context and caller SP are valid
_ASSERTE(pCurRegDisplay->IsCallerContextValid && pCurRegDisplay->IsCallerSPValid);
// 3) CallerSP is intact
_ASSERTE(GetSP(pCurRegDisplay->pCallerContext) == GetRegdisplaySP(pCurRegDisplay));
#endif // USE_CALLER_SP_IN_FUNCLET
#endif // USE_FUNCLET_CALL_HELPER
{
// CallHandler expects to be in COOP mode.
GCX_COOP();
dwResult = CallHandler(dwFilterStartPC, sf, &EHClause, pMD, Filter X86_ARG(pContext) ARM_ARG(pContext) ARM64_ARG(pContext));
}
}
EX_CATCH
{
// We had an exception in filter invocation that remained unhandled.
// Sync managed exception state, for the managed thread, based upon the active exception tracker.
pThread->SyncManagedExceptionState(false);
// we've returned from the filter abruptly, now out of managed code
m_EHClauseInfo.SetManagedCodeEntered(FALSE);
EH_LOG((LL_INFO100, " filter threw an exception\n"));
// notify profiler
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
m_EHClauseInfo.ResetInfo();
// continue search
}
EX_END_CATCH(SwallowAllExceptions);
// Reset the EH clause Index and Establisher of the TA reraise clause
pThread->m_dwIndexClauseForCatch = dwPreFilterTACatchHandlerClauseIndex;
pThread->m_sfEstablisherOfActualHandlerFrame = sfPreFilterEstablisherOfActualHandlerFrame;
if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame))
{
EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort\n"));
goto lExit;
}
}
else
{
EH_LOG((LL_INFO100, " STACKOVERFLOW: filter not called due to lack of guard page\n"));
// continue search
}
if (EXCEPTION_EXECUTE_HANDLER == dwResult)
{
fFoundHandler = TRUE;
}
else if (EXCEPTION_CONTINUE_SEARCH != dwResult)
{
//
// Behavior is undefined according to the spec. Let's not execute the handler.
//
}
EH_LOG((LL_INFO100, " filter returned %s\n", (fFoundHandler ? "EXCEPTION_EXECUTE_HANDLER" : "EXCEPTION_CONTINUE_SEARCH")));
}
else if (IsTypedHandler(&EHClause))
{
GCX_COOP();
TypeHandle thrownType = TypeHandle();
OBJECTREF oThrowable = m_pThread->GetThrowable();
if (oThrowable != NULL)
{
oThrowable = PossiblyUnwrapThrowable(oThrowable, pcfThisFrame->GetAssembly());
thrownType = oThrowable->GetTrueTypeHandle();
}
if (!thrownType.IsNull())
{
if (EHClause.ClassToken == mdTypeRefNil)
{
// this is a catch(...)
fFoundHandler = TRUE;
}
else
{
TypeHandle typeHnd = pJitMan->ResolveEHClause(&EHClause, pcfThisFrame);
EH_LOG((LL_INFO100,
" clause type = %s\n",
(!typeHnd.IsNull() ? typeHnd.GetMethodTable()->GetDebugClassName()
: "<couldn't resolve>")));
EH_LOG((LL_INFO100,
" thrown type = %s\n",
thrownType.GetMethodTable()->GetDebugClassName()));
fFoundHandler = !typeHnd.IsNull() && ExceptionIsOfRightType(typeHnd, thrownType);
}
}
}
else
{
_ASSERTE(fTermHandler);
fFoundHandler = TRUE;
}
if (fFoundHandler)
{
if (fIsFirstPass)
{
_ASSERTE(IsFilterHandler(&EHClause) || IsTypedHandler(&EHClause));
EH_LOG((LL_INFO100, " found catch at 0x%p, sp = 0x%p\n", dwHandlerStartPC, sf.SP));
m_uCatchToCallPC = dwHandlerStartPC;
m_pClauseForCatchToken = pEHClauseToken;
m_ClauseForCatch = EHClause;
m_sfResumeStackFrame = sf;
#if defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED)
//
// notify the debugger and profiler
//
if (fGiveDebuggerAndProfilerNotification)
{
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pMD);
}
if (fIsILStub)
{
//
// NotifyOfCHFFilter has two behaviors
// * Notifify debugger, get interception info and unwind (function will not return)
// In this case, m_sfResumeStackFrame is expected to be NULL or the frame of interception.
// We NULL it out because we get the interception event after this point.
// * Notifify debugger and return.
// In this case the normal EH proceeds and we need to reset m_sfResumeStackFrame to the sf catch handler.
// TODO: remove this call and try to report the IL catch handler in the IL stub itself.
m_sfResumeStackFrame.Clear();
EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter((EXCEPTION_POINTERS*)&m_ptrs, pILStubFrame);
m_sfResumeStackFrame = sf;
}
else
{
// We don't need to do anything special for continuable exceptions after calling
// this callback. We are going to start unwinding anyway.
EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread, pMD, (TADDR) uMethodStartPC, sf.SP,
&EHClause);
}
// If the exception is intercepted, then the target unwind frame may not be the
// stack frame we are currently processing, so clear it now. We'll set it
// later in second pass.
if (pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo())
{
m_sfResumeStackFrame.Clear();
}
#endif //defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED)
//
// BEGIN resume frame code
//
EH_LOG((LL_INFO100, " RESUMEFRAME: initial resume stack frame: %p\n", sf.SP));
if (IsDuplicateClause(&EHClause))
{
EH_LOG((LL_INFO100, " RESUMEFRAME: need to unwind to find real resume frame\n"));
m_ExceptionFlags.SetUnwindingToFindResumeFrame();
// This is a duplicate catch funclet. As a result, we will continue to let the
// exception dispatch proceed upstack to find the actual frame where the
// funclet lives.
//
// At the same time, we also need to save the CallerSP of the frame containing
// the catch funclet (like we do for other funclets). If the current frame
// represents a funclet that was invoked by JITted code, then we will save
// the caller SP of the current frame when we see it during the 2nd pass -
// refer to the use of "pLimitClauseToken" in the code above.
//
// However, that is not the callerSP of the frame containing the catch funclet
// as the actual frame containing the funclet (and where it will be executed)
// is the one that will be the target of unwind during the first pass.
//
// To correctly get that, we will determine if the current frame is a funclet
// and if it was invoked from JITted code. If this is true, then current frame
// represents a finally funclet invoked non-exceptionally (from its parent frame
// or yet another funclet). In such a case, we will set a flag indicating that
// we need to reset the enclosing clause SP for the catch funclet and later,
// when 2nd pass reaches the actual frame containing the catch funclet to be
// executed, we will update the enclosing clause SP if the
// "m_fResetEnclosingClauseSPForCatchFunclet" flag is set, just prior to
// invoking the catch funclet.
if (fIsFunclet)
{
REGDISPLAY* pCurRegDisplay = pcfThisFrame->GetRegisterSet();
_ASSERTE(pCurRegDisplay->IsCallerContextValid);
TADDR adrReturnAddressFromFunclet = PCODEToPINSTR(GetIP(pCurRegDisplay->pCallerContext)) - STACKWALK_CONTROLPC_ADJUST_OFFSET;
m_fResetEnclosingClauseSPForCatchFunclet = ExecutionManager::IsManagedCode(adrReturnAddressFromFunclet);
}
ReturnStatus = UnwindPending;
break;
}
EH_LOG((LL_INFO100, " RESUMEFRAME: no extra unwinding required, real resume frame: %p\n", sf.SP));
// Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler
// that decided to handle the exception. We may need it
// if a ThreadAbort is raised after the catch block
// executes.
m_dwIndexClauseForCatch = i + 1;
m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame;
#ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD);
#else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP;
#endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
//
// END resume frame code
//
ReturnStatus = FirstPassComplete;
break;
}
else
{
EH_LOG((LL_INFO100, " found finally/fault at 0x%p\n", dwHandlerStartPC));
_ASSERTE(fTermHandler);
// @todo : If user code throws a StackOveflowException and we have plenty of stack,
// we probably don't want to be so strict in not calling handlers.
if (!IsStackOverflowException())
{
DWORD_PTR dwStatus;
// for finally clauses
SetEnclosingClauseInfo(fIsFunclet,
pcfThisFrame->GetRelOffset(),
GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
// We have switched to indefinite COOP mode just before this loop started.
// Since we also forbid GC during second pass, disable it now since
// invocation of managed code can result in a GC.
ENDFORBIDGC();
dwStatus = CallHandler(dwHandlerStartPC, sf, &EHClause, pMD, FaultFinally X86_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM64_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext));
// Once we return from a funclet, forbid GC again (refer to comment before start of the loop for details)
BEGINFORBIDGC();
}
else
{
EH_LOG((LL_INFO100, " STACKOVERFLOW: finally not called due to lack of guard page\n"));
// continue search
}
//
// will continue to find next fault/finally in this call frame
//
}
} // if fFoundHandler
} // if clause covers PC
} // foreach eh clause
} // if stack frame is far enough away from guard page
//
// notify the profiler
//
if (fGiveDebuggerAndProfilerNotification)
{
if (fIsFirstPass)
{
if (!fUnwindingToFindResumeFrame)
{
EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pMD);
}
}
else
{
if (!fUnwindFinished)
{
EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pMD);
}
}
}
} // fIgnoreThisFrame
lExit:
return ReturnStatus;
}
// <64bit_And_Arm_Specific>
// For funclets, add support for unwinding frame chain during SO. These definitions will be automatically picked up by
// BEGIN_SO_TOLERANT_CODE/END_SO_TOLERANT_CODE usage in ExceptionTracker::CallHandler below.
//
// This is required since funclet invocation is the only case of calling managed code from VM that is not wrapped by
// assembly helper with associated personality routine. The personality routine will invoke CleanupForSecondPass to
// release exception trackers and unwind frame chain.
//
// We need to do the same work as CleanupForSecondPass for funclet invocation in the face of SO. Thus, we redefine OPTIONAL_SO_CLEANUP_UNWIND
// below. This will perform frame chain unwind inside the "__finally" block that is part of the END_SO_TOLERANT_CODE macro only in the face
// of an SO.
//
// The second part of work, releasing exception trackers, is done inside the "__except" block also part of the END_SO_TOLERANT_CODE by invoking
// ClearExceptionStateAfterSO.
//
// </64bit_And_Arm_Specific>
#undef OPTIONAL_SO_CLEANUP_UNWIND
#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) if (pThread->GetFrame() < pFrame) { UnwindFrameChain(pThread, pFrame); }
typedef DWORD_PTR (HandlerFn)(UINT_PTR uStackFrame, Object* pExceptionObj);
#ifdef USE_FUNCLET_CALL_HELPER
// This is an assembly helper that enables us to call into EH funclets.
EXTERN_C DWORD_PTR STDCALL CallEHFunclet(Object *pThrowable, UINT_PTR pFuncletToInvoke, UINT_PTR *pFirstNonVolReg, UINT_PTR *pFuncletCallerSP);
// This is an assembly helper that enables us to call into EH filter funclets.
EXTERN_C DWORD_PTR STDCALL CallEHFilterFunclet(Object *pThrowable, TADDR CallerSP, UINT_PTR pFuncletToInvoke, UINT_PTR *pFuncletCallerSP);
static inline UINT_PTR CastHandlerFn(HandlerFn *pfnHandler)
{
#ifdef _TARGET_ARM_
return DataPointerToThumbCode<UINT_PTR, HandlerFn *>(pfnHandler);
#else
return (UINT_PTR)pfnHandler;
#endif
}
static inline UINT_PTR *GetFirstNonVolatileRegisterAddress(PCONTEXT pContextRecord)
{
#if defined(_TARGET_ARM_)
return (UINT_PTR*)&(pContextRecord->R4);
#elif defined(_TARGET_ARM64_)
return (UINT_PTR*)&(pContextRecord->X19);
#elif defined(_TARGET_X86_)
return (UINT_PTR*)&(pContextRecord->Edi);
#else
PORTABILITY_ASSERT("GetFirstNonVolatileRegisterAddress");
return NULL;
#endif
}
static inline TADDR GetFrameRestoreBase(PCONTEXT pContextRecord)
{
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
return GetSP(pContextRecord);
#elif defined(_TARGET_X86_)
return pContextRecord->Ebp;
#else
PORTABILITY_ASSERT("GetFrameRestoreBase");
return NULL;
#endif
}
#endif // USE_FUNCLET_CALL_HELPER
DWORD_PTR ExceptionTracker::CallHandler(
UINT_PTR uHandlerStartPC,
StackFrame sf,
EE_ILEXCEPTION_CLAUSE* pEHClause,
MethodDesc* pMD,
EHFuncletType funcletType
X86_ARG(PCONTEXT pContextRecord)
ARM_ARG(PCONTEXT pContextRecord)
ARM64_ARG(PCONTEXT pContextRecord)
)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
DWORD_PTR dwResumePC;
OBJECTREF throwable;
HandlerFn* pfnHandler = (HandlerFn*)uHandlerStartPC;
EH_LOG((LL_INFO100, " calling handler at 0x%p, sp = 0x%p\n", uHandlerStartPC, sf.SP));
Thread* pThread = GetThread();
// The first parameter specifies whether we want to make callbacks before (true) or after (false)
// calling the handler.
MakeCallbacksRelatedToHandler(true, pThread, pMD, pEHClause, uHandlerStartPC, sf);
_ASSERTE(pThread->DetermineIfGuardPagePresent());
throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pMD->GetAssembly());
// We probe for stack space before attempting to call a filter, finally, or catch clause. The path from
// here to the actual managed code is very short. We must probe, however, because the JIT does not generate a
// probe for us upon entry to the handler. This probe ensures we have enough stack space to actually make it
// into the managed code.
//
// Incase a SO happens, this macro will also unwind the frame chain before continuing to dispatch the SO
// upstack (look at the macro implementation for details).
BEGIN_SO_TOLERANT_CODE(pThread);
// Stores the current SP and BSP, which will be the caller SP and BSP for the funclet.
// Note that we are making the assumption here that the SP and BSP don't change from this point
// forward until we actually make the call to the funclet. If it's not the case then we will need
// some sort of assembly wrappers to help us out.
CallerStackFrame csfFunclet = CallerStackFrame((UINT_PTR)GetCurrentSP());
this->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
this->m_EHClauseInfo.SetCallerStackFrame(csfFunclet);
switch(funcletType)
{
case EHFuncletType::Filter:
ETW::ExceptionLog::ExceptionFilterBegin(pMD, (PVOID)uHandlerStartPC);
break;
case EHFuncletType::FaultFinally:
ETW::ExceptionLog::ExceptionFinallyBegin(pMD, (PVOID)uHandlerStartPC);
break;
case EHFuncletType::Catch:
ETW::ExceptionLog::ExceptionCatchBegin(pMD, (PVOID)uHandlerStartPC);
break;
}
#ifdef USE_FUNCLET_CALL_HELPER
// Invoke the funclet. We pass throwable only when invoking the catch block.
// Since the actual caller of the funclet is the assembly helper, pass the reference
// to the CallerStackFrame instance so that it can be updated.
CallerStackFrame* pCallerStackFrame = this->m_EHClauseInfo.GetCallerStackFrameForEHClauseReference();
UINT_PTR *pFuncletCallerSP = &(pCallerStackFrame->SP);
if (funcletType != EHFuncletType::Filter)
{
dwResumePC = CallEHFunclet((funcletType == EHFuncletType::Catch)?OBJECTREFToObject(throwable):(Object *)NULL,
CastHandlerFn(pfnHandler),
GetFirstNonVolatileRegisterAddress(pContextRecord),
pFuncletCallerSP);
}
else
{
// For invoking IL filter funclet, we pass the CallerSP to the funclet using which
// it will retrieve the framepointer for accessing the locals in the parent
// method.
dwResumePC = CallEHFilterFunclet(OBJECTREFToObject(throwable),
GetFrameRestoreBase(pContextRecord),
CastHandlerFn(pfnHandler),
pFuncletCallerSP);
}
#else // USE_FUNCLET_CALL_HELPER
//
// Invoke the funclet.
//
dwResumePC = pfnHandler(sf.SP, OBJECTREFToObject(throwable));
#endif // !USE_FUNCLET_CALL_HELPER
switch(funcletType)
{
case EHFuncletType::Filter:
ETW::ExceptionLog::ExceptionFilterEnd();
break;
case EHFuncletType::FaultFinally:
ETW::ExceptionLog::ExceptionFinallyEnd();
break;
case EHFuncletType::Catch:
ETW::ExceptionLog::ExceptionCatchEnd();
ETW::ExceptionLog::ExceptionThrownEnd();
break;
}
this->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
END_SO_TOLERANT_CODE;
// The first parameter specifies whether we want to make callbacks before (true) or after (false)
// calling the handler.
MakeCallbacksRelatedToHandler(false, pThread, pMD, pEHClause, uHandlerStartPC, sf);
return dwResumePC;
}
#undef OPTIONAL_SO_CLEANUP_UNWIND
#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
//
// this must be done after the second pass has run, it does not
// reference anything on the stack, so it is safe to run in an
// SEH __except clause as well as a C++ catch clause.
//
// static
void ExceptionTracker::PopTrackers(
void* pStackFrameSP
)
{
CONTRACTL
{
MODE_ANY;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
StackFrame sf((UINT_PTR)pStackFrameSP);
// Only call into PopTrackers if we have a managed thread and we have an exception progress.
// Otherwise, the call below (to PopTrackers) is a noop. If this ever changes, then this short-circuit needs to be fixed.
Thread *pCurThread = GetThread();
if ((pCurThread != NULL) && (pCurThread->GetExceptionState()->IsExceptionInProgress()))
{
// Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
// for details on the usage of this COOP switch.
GCX_COOP();
PopTrackers(sf, false);
}
}
//
// during the second pass, an exception might escape out to
// unmanaged code where it is swallowed (or potentially rethrown).
// The current tracker is abandoned in this case, and if a rethrow
// does happen in unmanaged code, this is unfortunately treated as
// a brand new exception. This is unavoidable because if two
// exceptions escape out to unmanaged code in this manner, a subsequent
// rethrow cannot be disambiguated as corresponding to the nested vs.
// the original exception.
void ExceptionTracker::PopTrackerIfEscaping(
void* pStackPointer
)
{
CONTRACTL
{
MODE_ANY;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
Thread* pThread = GetThread();
ThreadExceptionState* pExState = pThread->GetExceptionState();
ExceptionTracker* pTracker = pExState->m_pCurrentTracker;
CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid());
// If we are resuming in managed code (albeit further up the stack) we will still need this
// tracker. Otherwise we are either propagating into unmanaged code -- with the rethrow
// issues mentioned above -- or we are going unhandled.
//
// Note that we don't distinguish unmanaged code in the EE vs. unmanaged code outside the
// EE. We could use the types of the Frames above us to make this distinction. Without
// this, the technique of EX_TRY/EX_CATCH/EX_RETHROW inside the EE will lose its tracker
// and have to rely on LastThrownObject in the rethrow. Along the same lines, unhandled
// exceptions only have access to LastThrownObject.
//
// There may not be a current tracker if, for instance, UMThunk has dispatched into managed
// code via CallDescr. In that case, CallDescr may pop the tracker, leaving UMThunk with
// nothing to do.
if (pTracker && pTracker->m_sfResumeStackFrame.IsNull())
{
StackFrame sf((UINT_PTR)pStackPointer);
StackFrame sfTopMostStackFrameFromFirstPass = pTracker->GetTopmostStackFrameFromFirstPass();
// Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
// for details on the usage of this COOP switch.
GCX_COOP();
ExceptionTracker::PopTrackers(sf, true);
}
}
//
// static
void ExceptionTracker::PopTrackers(
StackFrame sfResumeFrame,
bool fPopWhenEqual
)
{
CONTRACTL
{
// Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
// for details on the mode being COOP here.
MODE_COOPERATIVE;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
Thread* pThread = GetThread();
ExceptionTracker* pTracker = (pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL);
// NOTE:
//
// This method is a no-op when there is no managed Thread object. We detect such a case and short circuit out in ExceptionTrackers::PopTrackers.
// If this ever changes, then please revisit that method and fix it up appropriately.
// If this tracker does not have valid stack ranges and it is in the first pass,
// then we came here likely when the tracker was being setup
// and an exception took place.
//
// In such a case, we will not pop off the tracker
if (pTracker && pTracker->m_ScannedStackRange.IsEmpty() && pTracker->IsInFirstPass())
{
// skip any others with empty ranges...
do
{
pTracker = pTracker->m_pPrevNestedInfo;
}
while (pTracker && pTracker->m_ScannedStackRange.IsEmpty());
// pTracker is now the first non-empty one, make sure it doesn't need popping
// if it does, then someone let an exception propagate out of the exception dispatch code
_ASSERTE(!pTracker || (pTracker->m_ScannedStackRange.GetUpperBound() > sfResumeFrame));
return;
}
#if defined(DEBUGGING_SUPPORTED)
DWORD_PTR dwInterceptStackFrame = 0;
// This method may be called on an unmanaged thread, in which case no interception can be done.
if (pTracker)
{
ThreadExceptionState* pExState = pThread->GetExceptionState();
// If the exception is intercepted, then pop trackers according to the stack frame at which
// the exception is intercepted. We must retrieve the frame pointer before we start popping trackers.
if (pExState->GetFlags()->DebuggerInterceptInfo())
{
pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&dwInterceptStackFrame,
NULL, NULL);
}
}
#endif // DEBUGGING_SUPPORTED
while (pTracker)
{
#ifndef FEATURE_PAL
// When we are about to pop off a tracker, it should
// have a stack range setup.
// It is not true on PAL where the scanned stack range needs to
// be reset after unwinding a sequence of native frames.
_ASSERTE(!pTracker->m_ScannedStackRange.IsEmpty());
#endif // FEATURE_PAL
ExceptionTracker* pPrev = pTracker->m_pPrevNestedInfo;
// <TODO>
// with new tracker collapsing code, we will only ever pop one of these at a time
// at the end of the 2nd pass. However, CLRException::HandlerState::SetupCatch
// still uses this function and we still need to revisit how it interacts with
// ExceptionTrackers
// </TODO>
if ((fPopWhenEqual && (pTracker->m_ScannedStackRange.GetUpperBound() == sfResumeFrame)) ||
(pTracker->m_ScannedStackRange.GetUpperBound() < sfResumeFrame))
{
#if defined(DEBUGGING_SUPPORTED)
if (g_pDebugInterface != NULL)
{
if (pTracker->m_ScannedStackRange.GetUpperBound().SP < dwInterceptStackFrame)
{
g_pDebugInterface->DeleteInterceptContext(pTracker->m_DebuggerExState.GetDebuggerInterceptContext());
}
else
{
_ASSERTE(dwInterceptStackFrame == 0 ||
( dwInterceptStackFrame == sfResumeFrame.SP &&
dwInterceptStackFrame == pTracker->m_ScannedStackRange.GetUpperBound().SP ));
}
}
#endif // DEBUGGING_SUPPORTED
ExceptionTracker* pTrackerToFree = pTracker;
EH_LOG((LL_INFO100, "Unlinking ExceptionTracker object 0x%p, thread = 0x%p\n", pTrackerToFree, pTrackerToFree->m_pThread));
CONSISTENCY_CHECK(pTracker->IsValid());
pTracker = pPrev;
// free managed tracker resources causing notification -- do this before unlinking the tracker
// this is necessary so that we know an exception is still in flight while we give the notification
FreeTrackerMemory(pTrackerToFree, memManaged);
// unlink the tracker from the thread
pThread->GetExceptionState()->m_pCurrentTracker = pTracker;
CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid());
// free unmanaged tracker resources
FreeTrackerMemory(pTrackerToFree, memUnmanaged);
}
else
{
break;
}
}
}
//
// static
ExceptionTracker* ExceptionTracker::GetOrCreateTracker(
UINT_PTR ControlPc,
StackFrame sf,
EXCEPTION_RECORD* pExceptionRecord,
CONTEXT* pContextRecord,
BOOL bAsynchronousThreadStop,
bool fIsFirstPass,
StackTraceState* pStackTraceState
)
{
CONTRACT(ExceptionTracker*)
{
MODE_ANY;
GC_TRIGGERS;
NOTHROW;
PRECONDITION(CheckPointer(pStackTraceState));
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
Thread* pThread = GetThread();
ThreadExceptionState* pExState = pThread->GetExceptionState();
ExceptionTracker* pTracker = pExState->m_pCurrentTracker;
CONSISTENCY_CHECK((NULL == pTracker) || (pTracker->IsValid()));
bool fCreateNewTracker = false;
bool fIsRethrow = false;
bool fTransitionFromSecondToFirstPass = false;
// Initialize the out parameter.
*pStackTraceState = STS_Append;
if (NULL != pTracker)
{
fTransitionFromSecondToFirstPass = fIsFirstPass && !pTracker->IsInFirstPass();
#ifndef FEATURE_PAL
// We don't check this on PAL where the scanned stack range needs to
// be reset after unwinding a sequence of native frames.
CONSISTENCY_CHECK(!pTracker->m_ScannedStackRange.IsEmpty());
#endif // FEATURE_PAL
if (pTracker->m_ExceptionFlags.IsRethrown())
{
EH_LOG((LL_INFO100, ">>continued processing of RETHROWN exception\n"));