Permalink
Fetching contributors…
Cannot retrieve contributors at this time
13722 lines (11733 sloc) 496 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
//
/* EXCEP.CPP:
*
*/
#include "common.h"
#include "frames.h"
#include "threads.h"
#include "excep.h"
#include "object.h"
#include "field.h"
#include "dbginterface.h"
#include "cgensys.h"
#include "comutilnative.h"
#include "siginfo.hpp"
#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in RealCOMPlusThrow
#include "perfcounters.h"
#include "dllimportcallback.h"
#include "stackwalk.h" //for CrawlFrame, in SetIPFromSrcToDst
#include "shimload.h"
#include "eeconfig.h"
#include "virtualcallstub.h"
#include "typestring.h"
#ifndef FEATURE_PAL
#include "dwreport.h"
#endif // !FEATURE_PAL
#include "eventreporter.h"
#ifdef FEATURE_COMINTEROP
#include<roerrorapi.h>
#endif
#ifdef WIN64EXCEPTIONS
#include "exceptionhandling.h"
#endif
#include <errorrep.h>
#ifndef FEATURE_PAL
// Include definition of GenericModeBlock
#include <msodw.h>
#endif // FEATURE_PAL
// Support for extracting MethodDesc of a delegate.
#include "comdelegate.h"
#ifndef FEATURE_PAL
// Windows uses 64kB as the null-reference area
#define NULL_AREA_SIZE (64 * 1024)
#else // !FEATURE_PAL
#define NULL_AREA_SIZE GetOsPageSize()
#endif // !FEATURE_PAL
#ifndef CROSSGEN_COMPILE
BOOL IsIPInEE(void *ip);
//----------------------------------------------------------------------------
//
// IsExceptionFromManagedCode - determine if pExceptionRecord points to a managed exception
//
// Arguments:
// pExceptionRecord - pointer to exception record
//
// Return Value:
// TRUE or FALSE
//
//----------------------------------------------------------------------------
BOOL IsExceptionFromManagedCode(const EXCEPTION_RECORD * pExceptionRecord)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(pExceptionRecord));
} CONTRACTL_END;
if (pExceptionRecord == NULL)
{
return FALSE;
}
DACCOP_IGNORE(FieldAccess, "EXCEPTION_RECORD is a OS structure, and ExceptionAddress is actually a target address here.");
UINT_PTR address = reinterpret_cast<UINT_PTR>(pExceptionRecord->ExceptionAddress);
// An exception code of EXCEPTION_COMPLUS indicates a managed exception
// has occurred (most likely due to executing a "throw" instruction).
//
// Also, a hardware level exception may not have an exception code of
// EXCEPTION_COMPLUS. In this case, an exception address that resides in
// managed code indicates a managed exception has occurred.
return (IsComPlusException(pExceptionRecord) ||
(ExecutionManager::IsManagedCode((PCODE)address)));
}
#ifndef DACCESS_COMPILE
#define SZ_UNHANDLED_EXCEPTION W("Unhandled Exception:")
#define SZ_UNHANDLED_EXCEPTION_CHARLEN ((sizeof(SZ_UNHANDLED_EXCEPTION) / sizeof(WCHAR)))
typedef struct {
OBJECTREF pThrowable;
STRINGREF s1;
OBJECTREF pTmpThrowable;
} ProtectArgsStruct;
PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord();
BOOL IsUnmanagedToManagedSEHHandler(EXCEPTION_REGISTRATION_RECORD*);
VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable, BOOL rethrow
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, CorruptionSeverity severity = NotCorrupting
#endif // FEATURE_CORRUPTING_EXCEPTIONS
);
//-------------------------------------------------------------------------------
// Basically, this asks whether the exception is a managed exception thrown by
// this instance of the CLR.
//
// The way the result is used, however, is to decide whether this instance is the
// one to throw up the Watson box.
//-------------------------------------------------------------------------------
BOOL ShouldOurUEFDisplayUI(PEXCEPTION_POINTERS pExceptionInfo)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
// Test first for the canned SO EXCEPTION_POINTERS structure as it has a NULL context record and will break the code below.
extern EXCEPTION_POINTERS g_SOExceptionPointers;
if (pExceptionInfo == &g_SOExceptionPointers)
{
return TRUE;
}
return IsComPlusException(pExceptionInfo->ExceptionRecord) || ExecutionManager::IsManagedCode(GetIP(pExceptionInfo->ContextRecord));
}
BOOL NotifyAppDomainsOfUnhandledException(
PEXCEPTION_POINTERS pExceptionPointers,
OBJECTREF *pThrowableIn,
BOOL useLastThrownObject,
BOOL isTerminating);
VOID SetManagedUnhandledExceptionBit(
BOOL useLastThrownObject);
void COMPlusThrowBoot(HRESULT hr)
{
STATIC_CONTRACT_THROWS;
_ASSERTE(g_fEEShutDown >= ShutDown_Finalize2 || !"This should not be called unless we are in the last phase of shutdown!");
ULONG_PTR arg = hr;
RaiseException(BOOTUP_EXCEPTION_COMPLUS, EXCEPTION_NONCONTINUABLE, 1, &arg);
}
//-------------------------------------------------------------------------------
// This simply tests to see if the exception object is a subclass of
// the descriminating class specified in the exception clause.
//-------------------------------------------------------------------------------
BOOL ExceptionIsOfRightType(TypeHandle clauseType, TypeHandle thrownType)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
// if not resolved to, then it wasn't loaded and couldn't have been thrown
if (clauseType.IsNull())
return FALSE;
if (clauseType == thrownType)
return TRUE;
// now look for parent match
TypeHandle superType = thrownType;
while (!superType.IsNull()) {
if (superType == clauseType) {
break;
}
superType = superType.GetParent();
}
return !superType.IsNull();
}
//===========================================================================
// Gets the message text from an exception
//===========================================================================
ULONG GetExceptionMessage(OBJECTREF throwable,
__inout_ecount(bufferLength) LPWSTR buffer,
ULONG bufferLength)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// Prefast buffer sanity check. Don't call the API with a zero length buffer.
if (bufferLength == 0)
{
_ASSERTE(bufferLength > 0);
return 0;
}
StackSString result;
GetExceptionMessage(throwable, result);
ULONG length = result.GetCount();
LPCWSTR chars = result.GetUnicode();
if (length < bufferLength)
{
wcsncpy_s(buffer, bufferLength, chars, length);
}
else
{
wcsncpy_s(buffer, bufferLength, chars, bufferLength-1);
}
return length;
}
//-----------------------------------------------------------------------------
// Given an object, get the "message" from it. If the object is an Exception
// call Exception.InternalToString, otherwise, call Object.ToString
//-----------------------------------------------------------------------------
void GetExceptionMessage(OBJECTREF throwable, SString &result)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
STRINGREF pString = GetExceptionMessage(throwable);
// If call returned NULL (not empty), oh well, no message.
if (pString != NULL)
pString->GetSString(result);
} // void GetExceptionMessage()
#if FEATURE_COMINTEROP
// This method returns IRestrictedErrorInfo associated with the ErrorObject.
// It checks whether the given managed exception object has __HasRestrictedLanguageErrorObject set
// in which case it returns the IRestrictedErrorInfo associated with the __RestrictedErrorObject.
IRestrictedErrorInfo* GetRestrictedErrorInfoFromErrorObject(OBJECTREF throwable)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
IRestrictedErrorInfo* pRestrictedErrorInfo = NULL;
// If there is no object, there is no restricted error.
if (throwable == NULL)
return NULL;
_ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
if (!IsException(throwable->GetMethodTable()))
{
return NULL;
}
struct _gc {
OBJECTREF Throwable;
OBJECTREF RestrictedErrorInfoObjRef;
} gc;
ZeroMemory(&gc, sizeof(gc));
GCPROTECT_BEGIN(gc);
gc.Throwable = throwable;
// Get the MethodDesc on which we'll call.
MethodDescCallSite getRestrictedLanguageErrorObject(METHOD__EXCEPTION__TRY_GET_RESTRICTED_LANGUAGE_ERROR_OBJECT, &gc.Throwable);
// Make the call.
ARG_SLOT Args[] =
{
ObjToArgSlot(gc.Throwable),
PtrToArgSlot(&gc.RestrictedErrorInfoObjRef)
};
BOOL bHasLanguageRestrictedErrorObject = (BOOL)getRestrictedLanguageErrorObject.Call_RetBool(Args);
if(bHasLanguageRestrictedErrorObject)
{
// The __RestrictedErrorObject represents IRestrictedErrorInfo RCW of a non-CLR platform. Lets get the corresponding IRestrictedErrorInfo for it.
pRestrictedErrorInfo = (IRestrictedErrorInfo *)GetComIPFromObjectRef(&gc.RestrictedErrorInfoObjRef, IID_IRestrictedErrorInfo);
}
GCPROTECT_END();
return pRestrictedErrorInfo;
}
#endif
STRINGREF GetExceptionMessage(OBJECTREF throwable)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// If there is no object, there is no message.
if (throwable == NULL)
return NULL;
// Assume we're calling Exception.InternalToString() ...
BinderMethodID sigID = METHOD__EXCEPTION__INTERNAL_TO_STRING;
// ... but if it isn't an exception, call Object.ToString().
_ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
if (!IsException(throwable->GetMethodTable()))
{
sigID = METHOD__OBJECT__TO_STRING;
}
// Return value.
STRINGREF pString = NULL;
GCPROTECT_BEGIN(throwable);
// Get the MethodDesc on which we'll call.
MethodDescCallSite toString(sigID, &throwable);
// Make the call.
ARG_SLOT arg[1] = {ObjToArgSlot(throwable)};
pString = toString.Call_RetSTRINGREF(arg);
GCPROTECT_END();
return pString;
}
HRESULT GetExceptionHResult(OBJECTREF throwable)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = E_FAIL;
if (throwable == NULL)
return hr;
// Since any object can be thrown in managed code, not only instances of System.Exception subclasses
// we need to check to see if we are dealing with an exception before attempting to retrieve
// the HRESULT field. If we are not dealing with an exception, then we will simply return E_FAIL.
_ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
if (IsException(throwable->GetMethodTable()))
{
hr = ((EXCEPTIONREF)throwable)->GetHResult();
}
return hr;
} // HRESULT GetExceptionHResult()
DWORD GetExceptionXCode(OBJECTREF throwable)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = E_FAIL;
if (throwable == NULL)
return hr;
// Since any object can be thrown in managed code, not only instances of System.Exception subclasses
// we need to check to see if we are dealing with an exception before attempting to retrieve
// the HRESULT field. If we are not dealing with an exception, then we will simply return E_FAIL.
_ASSERTE(IsException(throwable->GetMethodTable())); // what is the pathway here?
if (IsException(throwable->GetMethodTable()))
{
hr = ((EXCEPTIONREF)throwable)->GetXCode();
}
return hr;
} // DWORD GetExceptionXCode()
//------------------------------------------------------------------------------
// This function will extract some information from an Access Violation SEH
// exception, and store it in the System.AccessViolationException object.
// - the faulting instruction's IP.
// - the target address of the faulting instruction.
// - a code indicating attempted read vs write
//------------------------------------------------------------------------------
void SetExceptionAVParameters( // No return.
OBJECTREF throwable, // The object into which to set the values.
EXCEPTION_RECORD *pExceptionRecord) // The SEH exception information.
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(throwable != NULL);
}
CONTRACTL_END;
GCPROTECT_BEGIN(throwable)
{
// This should only be called for AccessViolationException
_ASSERTE(MscorlibBinder::GetException(kAccessViolationException) == throwable->GetMethodTable());
FieldDesc *pFD_ip = MscorlibBinder::GetField(FIELD__ACCESS_VIOLATION_EXCEPTION__IP);
FieldDesc *pFD_target = MscorlibBinder::GetField(FIELD__ACCESS_VIOLATION_EXCEPTION__TARGET);
FieldDesc *pFD_access = MscorlibBinder::GetField(FIELD__ACCESS_VIOLATION_EXCEPTION__ACCESSTYPE);
_ASSERTE(pFD_ip->GetFieldType() == ELEMENT_TYPE_I);
_ASSERTE(pFD_target->GetFieldType() == ELEMENT_TYPE_I);
_ASSERTE(pFD_access->GetFieldType() == ELEMENT_TYPE_I4);
void *ip = pExceptionRecord->ExceptionAddress;
void *target = (void*)(pExceptionRecord->ExceptionInformation[1]);
DWORD access = (DWORD)(pExceptionRecord->ExceptionInformation[0]);
pFD_ip->SetValuePtr(throwable, ip);
pFD_target->SetValuePtr(throwable, target);
pFD_access->SetValue32(throwable, access);
}
GCPROTECT_END();
} // void SetExceptionAVParameters()
//------------------------------------------------------------------------------
// This will call InternalPreserveStackTrace (if the throwable derives from
// System.Exception), to copy the stack trace to the _remoteStackTraceString.
// Doing so allows the stack trace of an exception caught by the runtime, and
// rethrown with COMPlusThrow(OBJECTREF thowable), to be preserved. Otherwise
// the exception handling code may clear the stack trace. (Generally, we see
// the stack trace preserved on win32 and cleared on win64.)
//------------------------------------------------------------------------------
void ExceptionPreserveStackTrace( // No return.
OBJECTREF throwable) // Object about to be thrown.
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
// If there is no object, there is no stack trace to save.
if (throwable == NULL)
return;
GCPROTECT_BEGIN(throwable);
// Make sure it is derived from System.Exception, that it is not one of the
// preallocated exception objects, and that it has a stack trace to save.
if (IsException(throwable->GetMethodTable()) &&
!CLRException::IsPreallocatedExceptionObject(throwable))
{
LOG((LF_EH, LL_INFO1000, "ExceptionPreserveStackTrace called\n"));
// We're calling Exception.InternalPreserveStackTrace() ...
BinderMethodID sigID = METHOD__EXCEPTION__INTERNAL_PRESERVE_STACK_TRACE;
// Get the MethodDesc on which we'll call.
MethodDescCallSite preserveStackTrace(sigID, &throwable);
// Make the call.
ARG_SLOT arg[1] = {ObjToArgSlot(throwable)};
preserveStackTrace.Call(arg);
}
GCPROTECT_END();
} // void ExceptionPreserveStackTrace()
// We have to cache the MethodTable and FieldDesc for wrapped non-compliant exceptions the first
// time we wrap, because we cannot tolerate a GC when it comes time to detect and unwrap one.
static MethodTable *pMT_RuntimeWrappedException;
static FieldDesc *pFD_WrappedException;
// Non-compliant exceptions are immediately wrapped in a RuntimeWrappedException instance. The entire
// exception system can now ignore the possibility of these cases except:
//
// 1) IL_Throw, which must wrap via this API
// 2) Calls to Filters & Catch handlers, which must unwrap based on whether the assembly is on the legacy
// plan.
//
void WrapNonCompliantException(OBJECTREF *ppThrowable)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(IsProtectedByGCFrame(ppThrowable));
}
CONTRACTL_END;
_ASSERTE(!IsException((*ppThrowable)->GetMethodTable()));
EX_TRY
{
// idempotent operations, so the race condition is okay.
if (pMT_RuntimeWrappedException == NULL)
pMT_RuntimeWrappedException = MscorlibBinder::GetException(kRuntimeWrappedException);
if (pFD_WrappedException == NULL)
pFD_WrappedException = MscorlibBinder::GetField(FIELD__RUNTIME_WRAPPED_EXCEPTION__WRAPPED_EXCEPTION);
OBJECTREF orWrapper = AllocateObject(MscorlibBinder::GetException(kRuntimeWrappedException));
GCPROTECT_BEGIN(orWrapper);
MethodDescCallSite ctor(METHOD__RUNTIME_WRAPPED_EXCEPTION__OBJ_CTOR, &orWrapper);
ARG_SLOT args[] =
{
ObjToArgSlot(orWrapper),
ObjToArgSlot(*ppThrowable)
};
ctor.Call(args);
*ppThrowable = orWrapper;
GCPROTECT_END();
}
EX_CATCH
{
// If we took an exception while binding, or running the constructor of the RuntimeWrappedException
// instance, we know that this new exception is CLS compliant. In fact, it's likely to be
// OutOfMemoryException, StackOverflowException or ThreadAbortException.
OBJECTREF orReplacement = GET_THROWABLE();
_ASSERTE(IsException(orReplacement->GetMethodTable()));
*ppThrowable = orReplacement;
} EX_END_CATCH(SwallowAllExceptions);
}
// Before presenting an exception object to a handler (filter or catch, not finally or fault), it
// may be necessary to turn it back into a non-compliant exception. This is conditioned on an
// assembly level setting.
OBJECTREF PossiblyUnwrapThrowable(OBJECTREF throwable, Assembly *pAssembly)
{
// Check if we are required to compute the RuntimeWrapExceptions status.
BOOL fIsRuntimeWrappedException = ((throwable != NULL) && (throwable->GetMethodTable() == pMT_RuntimeWrappedException));
BOOL fRequiresComputingRuntimeWrapExceptionsStatus = (fIsRuntimeWrappedException &&
(!(pAssembly->GetManifestModule()->IsRuntimeWrapExceptionsStatusComputed())));
CONTRACTL
{
THROWS;
// If we are required to compute the status of RuntimeWrapExceptions, then the operation could trigger a GC.
// Thus, conditionally setup the contract.
if (fRequiresComputingRuntimeWrapExceptionsStatus) GC_TRIGGERS; else GC_NOTRIGGER;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pAssembly));
}
CONTRACTL_END;
if (fIsRuntimeWrappedException && (!pAssembly->GetManifestModule()->IsRuntimeWrapExceptions()))
{
// We already created the instance, fetched the field. We know it is
// not marshal by ref, or any of the other cases that might trigger GC.
ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
throwable = pFD_WrappedException->GetRefValue(throwable);
}
return throwable;
}
// This is used by a holder in CreateTypeInitializationExceptionObject to
// reset the state as appropriate.
void ResetTypeInitializationExceptionState(BOOL isAlreadyCreating)
{
LIMITED_METHOD_CONTRACT;
if (!isAlreadyCreating)
GetThread()->ResetIsCreatingTypeInitException();
}
void CreateTypeInitializationExceptionObject(LPCWSTR pTypeThatFailed,
OBJECTREF *pInnerException,
OBJECTREF *pInitException,
OBJECTREF *pThrowable)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pInnerException, NULL_OK));
PRECONDITION(CheckPointer(pInitException));
PRECONDITION(CheckPointer(pThrowable));
PRECONDITION(IsProtectedByGCFrame(pInnerException));
PRECONDITION(IsProtectedByGCFrame(pInitException));
PRECONDITION(IsProtectedByGCFrame(pThrowable));
PRECONDITION(CheckPointer(GetThread()));
} CONTRACTL_END;
Thread *pThread = GetThread();
*pThrowable = NULL;
// This will make sure to put the thread back to its original state if something
// throws out of this function (like an OOM exception or something)
Holder< BOOL, DoNothing< BOOL >, ResetTypeInitializationExceptionState, FALSE, NoNull< BOOL > >
isAlreadyCreating(pThread->IsCreatingTypeInitException());
EX_TRY {
// This will contain the type of exception we want to create. Read comment below
// on why we'd want to create an exception other than TypeInitException
MethodTable *pMT;
BinderMethodID methodID;
// If we are already in the midst of creating a TypeInitializationException object,
// and we get here, it means there was an exception thrown while initializing the
// TypeInitializationException type itself, or one of the types used by its class
// constructor. In this case, we're going to back down and use a SystemException
// object in its place. It is *KNOWN* that both these exception types have identical
// .ctor sigs "void instance (string, exception)" so both can be used interchangeably
// in the code that follows.
if (!isAlreadyCreating.GetValue()) {
pThread->SetIsCreatingTypeInitException();
pMT = MscorlibBinder::GetException(kTypeInitializationException);
methodID = METHOD__TYPE_INIT_EXCEPTION__STR_EX_CTOR;
}
else {
// If we ever hit one of these asserts, then it is bad
// because we do not know what exception to return then.
_ASSERTE(pInnerException != NULL);
_ASSERTE(*pInnerException != NULL);
*pThrowable = *pInnerException;
*pInitException = *pInnerException;
goto ErrExit;
}
// Allocate the exception object
*pThrowable = AllocateObject(pMT);
MethodDescCallSite ctor(methodID, pThrowable);
// Since the inner exception object in the .ctor is of type Exception, make sure
// that the object we're passed in derives from Exception. If not, pass NULL.
BOOL isException = FALSE;
if (pInnerException != NULL)
isException = IsException((*pInnerException)->GetMethodTable());
_ASSERTE(isException); // What pathway can give us non-compliant exceptions?
STRINGREF sType = StringObject::NewString(pTypeThatFailed);
// If the inner object derives from exception, set it as the third argument.
ARG_SLOT args[] = { ObjToArgSlot(*pThrowable),
ObjToArgSlot(sType),
ObjToArgSlot(isException ? *pInnerException : NULL) };
// Call the .ctor
ctor.Call(args);
// On success, set the init exception.
*pInitException = *pThrowable;
}
EX_CATCH {
// If calling the constructor fails, then we'll call ourselves again, and this time
// through we will try and create an EEException object. If that fails, then the
// else block of this will be executed.
if (!isAlreadyCreating.GetValue()) {
CreateTypeInitializationExceptionObject(pTypeThatFailed, pInnerException, pInitException, pThrowable);
}
// If we were already in the middle of creating a type init
// exception when we were called, we would have tried to create an EEException instead
// of a TypeInitException.
else {
// If we're recursing, then we should be calling ourselves from DoRunClassInitThrowing,
// in which case we're guaranteed that we're passing in all three arguments.
*pInitException = pInnerException ? *pInnerException : NULL;
*pThrowable = GET_THROWABLE();
}
} EX_END_CATCH(SwallowAllExceptions);
CONSISTENCY_CHECK(*pInitException != NULL || !pInnerException);
ErrExit:
;
}
// ==========================================================================
// ComputeEnclosingHandlerNestingLevel
//
// This is code factored out of COMPlusThrowCallback to figure out
// what the number of nested exception handlers is.
// ==========================================================================
DWORD ComputeEnclosingHandlerNestingLevel(IJitManager *pIJM,
const METHODTOKEN& mdTok,
SIZE_T offsNat)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
// Determine the nesting level of EHClause. Just walk the table
// again, and find out how many handlers enclose it
DWORD nestingLevel = 0;
EH_CLAUSE_ENUMERATOR pEnumState;
unsigned EHCount = pIJM->InitializeEHEnumeration(mdTok, &pEnumState);
for (unsigned j=0; j<EHCount; j++)
{
EE_ILEXCEPTION_CLAUSE EHClause;
pIJM->GetNextEHClause(&pEnumState,&EHClause);
_ASSERTE(EHClause.HandlerEndPC != (DWORD) -1); // <TODO> remove, only protects against a deprecated convention</TODO>
if ((offsNat > EHClause.HandlerStartPC) &&
(offsNat < EHClause.HandlerEndPC))
{
nestingLevel++;
}
}
return nestingLevel;
}
// ******************************* EHRangeTreeNode ************************** //
EHRangeTreeNode::EHRangeTreeNode(void)
{
WRAPPER_NO_CONTRACT;
CommonCtor(0, false);
}
EHRangeTreeNode::EHRangeTreeNode(DWORD offset, bool fIsRange /* = false */)
{
WRAPPER_NO_CONTRACT;
CommonCtor(offset, fIsRange);
}
void EHRangeTreeNode::CommonCtor(DWORD offset, bool fIsRange)
{
LIMITED_METHOD_CONTRACT;
m_pTree = NULL;
m_clause = NULL;
m_pContainedBy = NULL;
m_offset = offset;
m_fIsRange = fIsRange;
m_fIsRoot = false; // must set this flag explicitly
}
inline bool EHRangeTreeNode::IsRange()
{
// Please see the header file for an explanation of this assertion.
_ASSERTE(m_fIsRoot || m_clause != NULL || !m_fIsRange);
return m_fIsRange;
}
void EHRangeTreeNode::MarkAsRange()
{
m_offset = 0;
m_fIsRange = true;
m_fIsRoot = false;
}
inline bool EHRangeTreeNode::IsRoot()
{
// Please see the header file for an explanation of this assertion.
_ASSERTE(m_fIsRoot || m_clause != NULL || !m_fIsRange);
return m_fIsRoot;
}
void EHRangeTreeNode::MarkAsRoot(DWORD offset)
{
m_offset = offset;
m_fIsRange = true;
m_fIsRoot = true;
}
inline DWORD EHRangeTreeNode::GetOffset()
{
_ASSERTE(m_clause == NULL);
_ASSERTE(IsRoot() || !IsRange());
return m_offset;
}
inline DWORD EHRangeTreeNode::GetTryStart()
{
_ASSERTE(IsRange());
_ASSERTE(!IsRoot());
if (IsRoot())
{
return 0;
}
else
{
return m_clause->TryStartPC;
}
}
inline DWORD EHRangeTreeNode::GetTryEnd()
{
_ASSERTE(IsRange());
_ASSERTE(!IsRoot());
if (IsRoot())
{
return GetOffset();
}
else
{
return m_clause->TryEndPC;
}
}
inline DWORD EHRangeTreeNode::GetHandlerStart()
{
_ASSERTE(IsRange());
_ASSERTE(!IsRoot());
if (IsRoot())
{
return 0;
}
else
{
return m_clause->HandlerStartPC;
}
}
inline DWORD EHRangeTreeNode::GetHandlerEnd()
{
_ASSERTE(IsRange());
_ASSERTE(!IsRoot());
if (IsRoot())
{
return GetOffset();
}
else
{
return m_clause->HandlerEndPC;
}
}
inline DWORD EHRangeTreeNode::GetFilterStart()
{
_ASSERTE(IsRange());
_ASSERTE(!IsRoot());
if (IsRoot())
{
return 0;
}
else
{
return m_clause->FilterOffset;
}
}
// Get the end offset of the filter clause. This offset is exclusive.
inline DWORD EHRangeTreeNode::GetFilterEnd()
{
_ASSERTE(IsRange());
_ASSERTE(!IsRoot());
if (IsRoot())
{
// We should never get here if the "this" node is the root.
// By definition, the root contains everything. No checking is necessary.
return 0;
}
else
{
return m_FilterEndPC;
}
}
bool EHRangeTreeNode::Contains(DWORD offset)
{
WRAPPER_NO_CONTRACT;
EHRangeTreeNode node(offset);
return Contains(&node);
}
bool EHRangeTreeNode::TryContains(DWORD offset)
{
WRAPPER_NO_CONTRACT;
EHRangeTreeNode node(offset);
return TryContains(&node);
}
bool EHRangeTreeNode::HandlerContains(DWORD offset)
{
WRAPPER_NO_CONTRACT;
EHRangeTreeNode node(offset);
return HandlerContains(&node);
}
bool EHRangeTreeNode::FilterContains(DWORD offset)
{
WRAPPER_NO_CONTRACT;
EHRangeTreeNode node(offset);
return FilterContains(&node);
}
bool EHRangeTreeNode::Contains(EHRangeTreeNode* pNode)
{
LIMITED_METHOD_CONTRACT;
// If we are checking a range of address, then we should check the end address inclusively.
if (pNode->IsRoot())
{
// No node contains the root node.
return false;
}
else if (this->IsRoot())
{
return (pNode->IsRange() ?
(pNode->GetTryEnd() <= this->GetOffset()) && (pNode->GetHandlerEnd() <= this->GetOffset())
: (pNode->GetOffset() < this->GetOffset()) );
}
else
{
return (this->TryContains(pNode) || this->HandlerContains(pNode) || this->FilterContains(pNode));
}
}
bool EHRangeTreeNode::TryContains(EHRangeTreeNode* pNode)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(this->IsRange());
if (pNode->IsRoot())
{
// No node contains the root node.
return false;
}
else if (this->IsRoot())
{
// We will only get here from GetTcf() to determine if an address is in a try clause.
// In this case we want to return false.
return false;
}
else
{
DWORD tryStart = this->GetTryStart();
DWORD tryEnd = this->GetTryEnd();
// If we are checking a range of address, then we should check the end address inclusively.
if (pNode->IsRange())
{
DWORD start = pNode->GetTryStart();
DWORD end = pNode->GetTryEnd();
if (start == tryStart && end == tryEnd)
{
return false;
}
else if (start == end)
{
// This is effectively a single offset.
if ((tryStart <= start) && (end < tryEnd))
{
return true;
}
}
else if ((tryStart <= start) && (end <= tryEnd))
{
return true;
}
}
else
{
DWORD offset = pNode->GetOffset();
if ((tryStart <= offset) && (offset < tryEnd))
{
return true;
}
}
}
#ifdef WIN64EXCEPTIONS
// If we are boot-strapping the tree, don't recurse down because the result could be unreliable. Note that
// even if we don't recurse, given a particular node, we can still always find its most specific container with
// the logic above, i.e. it's always safe to do one depth level of checking.
//
// To build the tree, all we need to know is the most specific container of a particular node. This can be
// done by just comparing the offsets of the try regions. However, funclets create a problem because even if
// a funclet is conceptually contained in a try region, we cannot determine this fact just by comparing the offsets.
// This is when we need to recurse the tree. Here is a classic example:
// try
// {
// try
// {
// }
// catch
// {
// // If the offset is here, then we need to recurse.
// }
// }
// catch
// {
// }
if (!m_pTree->m_fInitializing)
{
// Iterate all the contained clauses, and for the ones which are contained in the try region,
// ask if the requested range is contained by it.
USHORT i = 0;
USHORT numNodes = m_containees.Count();
EHRangeTreeNode** ppNodes = NULL;
for (i = 0, ppNodes = m_containees.Table(); i < numNodes; i++, ppNodes++)
{
// This variable is purely used for readability.
EHRangeTreeNode* pNodeCur = *ppNodes;
// it's possible for nested try blocks to have the same beginning and end offsets
if ( ( this->GetTryStart() <= pNodeCur->GetTryStart() ) &&
( pNodeCur->GetTryEnd() <= this->GetTryEnd() ) )
{
if (pNodeCur->Contains(pNode))
{
return true;
}
}
}
}
#endif // WIN64EXCEPTIONS
return false;
}
bool EHRangeTreeNode::HandlerContains(EHRangeTreeNode* pNode)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(this->IsRange());
if (pNode->IsRoot())
{
// No node contains the root node.
return false;
}
else if (this->IsRoot())
{
// We will only get here from GetTcf() to determine if an address is in a try clause.
// In this case we want to return false.
return false;
}
else
{
DWORD handlerStart = this->GetHandlerStart();
DWORD handlerEnd = this->GetHandlerEnd();
// If we are checking a range of address, then we should check the end address inclusively.
if (pNode->IsRange())
{
DWORD start = pNode->GetTryStart();
DWORD end = pNode->GetTryEnd();
if (start == handlerStart && end == handlerEnd)
{
return false;
}
else if ((handlerStart <= start) && (end <= handlerEnd))
{
return true;
}
}
else
{
DWORD offset = pNode->GetOffset();
if ((handlerStart <= offset) && (offset < handlerEnd))
{
return true;
}
}
}
#ifdef WIN64EXCEPTIONS
// Refer to the comment in TryContains().
if (!m_pTree->m_fInitializing)
{
// Iterate all the contained clauses, and for the ones which are contained in the try region,
// ask if the requested range is contained by it.
USHORT i = 0;
USHORT numNodes = m_containees.Count();
EHRangeTreeNode** ppNodes = NULL;
for (i = 0, ppNodes = m_containees.Table(); i < numNodes; i++, ppNodes++)
{
// This variable is purely used for readability.
EHRangeTreeNode* pNodeCur = *ppNodes;
if ( ( this->GetHandlerStart() <= pNodeCur->GetTryStart() ) &&
( pNodeCur->GetTryEnd() < this->GetHandlerEnd() ) )
{
if (pNodeCur->Contains(pNode))
{
return true;
}
}
}
}
#endif // WIN64EXCEPTIONS
return false;
}
bool EHRangeTreeNode::FilterContains(EHRangeTreeNode* pNode)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(this->IsRange());
if (pNode->IsRoot())
{
// No node contains the root node.
return false;
}
else if (this->IsRoot() || !IsFilterHandler(this->m_clause))
{
// We will only get here from GetTcf() to determine if an address is in a try clause.
// In this case we want to return false.
return false;
}
else
{
DWORD filterStart = this->GetFilterStart();
DWORD filterEnd = this->GetFilterEnd();
// If we are checking a range of address, then we should check the end address inclusively.
if (pNode->IsRange())
{
DWORD start = pNode->GetTryStart();
DWORD end = pNode->GetTryEnd();
if (start == filterStart && end == filterEnd)
{
return false;
}
else if ((filterStart <= start) && (end <= filterEnd))
{
return true;
}
}
else
{
DWORD offset = pNode->GetOffset();
if ((filterStart <= offset) && (offset < filterEnd))
{
return true;
}
}
}
#ifdef WIN64EXCEPTIONS
// Refer to the comment in TryContains().
if (!m_pTree->m_fInitializing)
{
// Iterate all the contained clauses, and for the ones which are contained in the try region,
// ask if the requested range is contained by it.
USHORT i = 0;
USHORT numNodes = m_containees.Count();
EHRangeTreeNode** ppNodes = NULL;
for (i = 0, ppNodes = m_containees.Table(); i < numNodes; i++, ppNodes++)
{
// This variable is purely used for readability.
EHRangeTreeNode* pNodeCur = *ppNodes;
if ( ( this->GetFilterStart() <= pNodeCur->GetTryStart() ) &&
( pNodeCur->GetTryEnd() < this->GetFilterEnd() ) )
{
if (pNodeCur->Contains(pNode))
{
return true;
}
}
}
}
#endif // WIN64EXCEPTIONS
return false;
}
EHRangeTreeNode* EHRangeTreeNode::GetContainer()
{
return m_pContainedBy;
}
HRESULT EHRangeTreeNode::AddNode(EHRangeTreeNode *pNode)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(return E_OUTOFMEMORY;);
PRECONDITION(pNode != NULL);
}
CONTRACTL_END;
EHRangeTreeNode **ppEH = m_containees.Append();
if (ppEH == NULL)
return E_OUTOFMEMORY;
(*ppEH) = pNode;
return S_OK;
}
// ******************************* EHRangeTree ************************** //
EHRangeTree::EHRangeTree(IJitManager* pIJM,
const METHODTOKEN& methodToken,
DWORD methodSize,
int cFunclet,
const DWORD * rgFunclet)
{
LIMITED_METHOD_CONTRACT;
LOG((LF_CORDB, LL_INFO10000, "EHRT::ERHT: already loaded!\n"));
EH_CLAUSE_ENUMERATOR pEnumState;
m_EHCount = pIJM->InitializeEHEnumeration(methodToken, &pEnumState);
_ASSERTE(m_EHCount != 0xFFFFFFFF);
ULONG i = 0;
m_rgClauses = NULL;
m_rgNodes = NULL;
m_root = NULL;
m_hrInit = S_OK;
m_fInitializing = true;
if (m_EHCount > 0)
{
m_rgClauses = new (nothrow) EE_ILEXCEPTION_CLAUSE[m_EHCount];
if (m_rgClauses == NULL)
{
m_hrInit = E_OUTOFMEMORY;
goto LError;
}
}
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: m_ehcount:0x%x, m_rgClauses:0%x\n",
m_EHCount, m_rgClauses));
m_rgNodes = new (nothrow) EHRangeTreeNode[m_EHCount+1];
if (m_rgNodes == NULL)
{
m_hrInit = E_OUTOFMEMORY;
goto LError;
}
//this contains everything, even stuff on the last IP
m_root = &(m_rgNodes[m_EHCount]);
m_root->MarkAsRoot(methodSize + 1);
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: rgNodes:0x%x\n", m_rgNodes));
if (m_EHCount ==0)
{
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: About to leave!\n"));
goto LSuccess;
}
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: Sticking around!\n"));
// First, load all the EH clauses into the object.
for (i = 0; i < m_EHCount; i++)
{
EE_ILEXCEPTION_CLAUSE * pEHClause = &(m_rgClauses[i]);
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: i:0x%x!\n", i));
pIJM->GetNextEHClause(&pEnumState, pEHClause);
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: EHRTT_JIT_MANAGER got clause\n", i));
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: clause 0x%x,"
"addrof:0x%x\n", i, pEHClause ));
_ASSERTE(pEHClause->HandlerEndPC != (DWORD) -1); // <TODO> remove, only protects against a deprecated convention</TODO>
EHRangeTreeNode * pNodeCur = &(m_rgNodes[i]);
pNodeCur->m_pTree = this;
pNodeCur->m_clause = pEHClause;
if (pEHClause->Flags == COR_ILEXCEPTION_CLAUSE_FILTER)
{
#ifdef WIN64EXCEPTIONS
// Because of funclets, there is no way to guarantee the placement of a filter.
// Thus, we need to loop through the funclets to find the end offset.
for (int f = 0; f < cFunclet; f++)
{
// Check the start offset of the filter funclet.
if (pEHClause->FilterOffset == rgFunclet[f])
{
if (f < (cFunclet - 1))
{
// If it's NOT the last funclet, use the start offset of the next funclet.
pNodeCur->m_FilterEndPC = rgFunclet[f + 1];
}
else
{
// If it's the last funclet, use the size of the method.
pNodeCur->m_FilterEndPC = methodSize;
}
break;
}
}
#else // WIN64EXCEPTIONS
// On x86, since the filter doesn't have an end FilterPC, the only way we can know the size
// of the filter is if it's located immediately prior to it's handler and immediately after
// its try region. We assume that this is, and if it isn't, we're so amazingly hosed that
// we can't continue.
if ((pEHClause->FilterOffset >= pEHClause->HandlerStartPC) ||
(pEHClause->FilterOffset < pEHClause->TryEndPC))
{
m_hrInit = CORDBG_E_SET_IP_IMPOSSIBLE;
goto LError;
}
pNodeCur->m_FilterEndPC = pEHClause->HandlerStartPC;
#endif // WIN64EXCEPTIONS
}
pNodeCur->MarkAsRange();
}
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: about to do the second pass\n"));
// Second, for each EH, find it's most limited, containing clause
// On WIN64, we have duplicate clauses. There are two types of duplicate clauses.
//
// The first type is described in ExceptionHandling.cpp. This type doesn't add additional information to the
// EH tree structure. For example, if an offset is in the try region of a duplicate clause of this type,
// then some clause which comes before the duplicate clause should contain the offset in its handler region.
// Therefore, even though this type of duplicate clauses are added to the EH tree, they should never be used.
//
// The second type is what's called the protected clause. These clauses are used to mark the cloned finally
// region. They have an empty try region. Here's an example:
//
// // C# code
// try
// {
// A
// }
// finally
// {
// B
// }
//
// // jitted code
// parent
// -------
// A
// B'
// -------
//
// funclet
// -------
// B
// -------
//
// A protected clause covers the B' region in the parent method. In essence you can think of the method as
// having two try/finally regions, and that's exactly how protected clauses are handled in the EH tree.
// They are added to the EH tree just like any other EH clauses.
for (i = 0; i < m_EHCount; i++)
{
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: SP:0x%x\n", i));
EHRangeTreeNode * pNodeCur = &(m_rgNodes[i]);
EHRangeTreeNode *pNodeCandidate = NULL;
pNodeCandidate = FindContainer(pNodeCur);
_ASSERTE(pNodeCandidate != NULL);
pNodeCur->m_pContainedBy = pNodeCandidate;
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: SP: about to add to tree\n"));
HRESULT hr = pNodeCandidate->AddNode(pNodeCur);
if (FAILED(hr))
{
m_hrInit = hr;
goto LError;
}
}
LSuccess:
m_fInitializing = false;
return;
LError:
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: LError - something went wrong!\n"));
if (m_rgClauses != NULL)
{
delete [] m_rgClauses;
m_rgClauses = NULL;
}
if (m_rgNodes != NULL)
{
delete [] m_rgNodes;
m_rgNodes = NULL;
}
m_fInitializing = false;
LOG((LF_CORDB, LL_INFO10000, "EHRT::CC: Falling off of LError!\n"));
} // Ctor Core
EHRangeTree::~EHRangeTree()
{
LIMITED_METHOD_CONTRACT;
if (m_rgNodes != NULL)
delete [] m_rgNodes;
if (m_rgClauses != NULL)
delete [] m_rgClauses;
} //Dtor
EHRangeTreeNode *EHRangeTree::FindContainer(EHRangeTreeNode *pNodeSearch)
{
LIMITED_METHOD_CONTRACT;
EHRangeTreeNode *pNodeCandidate = NULL;
// Examine the root, too.
for (ULONG iInner = 0; iInner < m_EHCount+1; iInner++)
{
EHRangeTreeNode *pNodeCur = &(m_rgNodes[iInner]);
// Check if the current node contains the node we are searching for.
if ((pNodeSearch != pNodeCur) &&
pNodeCur->Contains(pNodeSearch))
{
// Update the candidate node if it is NULL or if it contains the current node
// (i.e. the current node is more specific than the candidate node).
if ((pNodeCandidate == NULL) ||
pNodeCandidate->Contains(pNodeCur))
{
pNodeCandidate = pNodeCur;
}
}
}
return pNodeCandidate;
}
EHRangeTreeNode *EHRangeTree::FindMostSpecificContainer(DWORD addr)
{
WRAPPER_NO_CONTRACT;
EHRangeTreeNode node(addr);
return FindContainer(&node);
}
EHRangeTreeNode *EHRangeTree::FindNextMostSpecificContainer(EHRangeTreeNode *pNodeSearch, DWORD addr)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(!m_fInitializing);
EHRangeTreeNode **rgpNodes = pNodeSearch->m_containees.Table();
if (NULL == rgpNodes)
return pNodeSearch;
// It's possible that no subrange contains the desired address, so
// keep a reasonable default around.
EHRangeTreeNode *pNodeCandidate = pNodeSearch;
USHORT cSubRanges = pNodeSearch->m_containees.Count();
EHRangeTreeNode **ppNodeCur = pNodeSearch->m_containees.Table();
for (int i = 0; i < cSubRanges; i++, ppNodeCur++)
{
if ((*ppNodeCur)->Contains(addr) &&
pNodeCandidate->Contains((*ppNodeCur)))
{
pNodeCandidate = (*ppNodeCur);
}
}
return pNodeCandidate;
}
BOOL EHRangeTree::isAtStartOfCatch(DWORD offset)
{
LIMITED_METHOD_CONTRACT;
if (NULL != m_rgNodes && m_EHCount != 0)
{
for(unsigned i = 0; i < m_EHCount;i++)
{
if (m_rgNodes[i].m_clause->HandlerStartPC == offset &&
(!IsFilterHandler(m_rgNodes[i].m_clause) && !IsFaultOrFinally(m_rgNodes[i].m_clause)))
return TRUE;
}
}
return FALSE;
}
enum TRY_CATCH_FINALLY
{
TCF_NONE= 0,
TCF_TRY,
TCF_FILTER,
TCF_CATCH,
TCF_FINALLY,
TCF_COUNT, //count of all elements, not an element itself
};
#ifdef LOGGING
const char *TCFStringFromConst(TRY_CATCH_FINALLY tcf)
{
LIMITED_METHOD_CONTRACT;
switch( tcf )
{
case TCF_NONE:
return "TCFS_NONE";
break;
case TCF_TRY:
return "TCFS_TRY";
break;
case TCF_FILTER:
return "TCF_FILTER";
break;
case TCF_CATCH:
return "TCFS_CATCH";
break;
case TCF_FINALLY:
return "TCFS_FINALLY";
break;
case TCF_COUNT:
return "TCFS_COUNT";
break;
default:
return "INVALID TCFS VALUE";
break;
}
}
#endif //LOGGING
#ifndef WIN64EXCEPTIONS
// We're unwinding if we'll return to the EE's code. Otherwise
// we'll return to someplace in the current code. Anywhere outside
// this function is "EE code".
bool FinallyIsUnwinding(EHRangeTreeNode *pNode,
ICodeManager* pEECM,
PREGDISPLAY pReg,
SLOT addrStart)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
const BYTE *pbRetAddr = pEECM->GetFinallyReturnAddr(pReg);
if (pbRetAddr < (const BYTE *)addrStart)
return true;
DWORD offset = (DWORD)(size_t)(pbRetAddr - addrStart);
EHRangeTreeNode *pRoot = pNode->m_pTree->m_root;
if (!pRoot->Contains(offset))
return true;
else
return false;
}
BOOL LeaveCatch(ICodeManager* pEECM,
Thread *pThread,
CONTEXT *pCtx,
GCInfoToken gcInfoToken,
unsigned offset)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// We can assert these things here, and skip a call
// to COMPlusCheckForAbort later.
// If no abort has been requested,
_ASSERTE((pThread->GetThrowable() != NULL) ||
// or if there is a pending exception.
(!pThread->IsAbortRequested()) );
LPVOID esp = COMPlusEndCatchWorker(pThread);
PopNestedExceptionRecords(esp, pCtx, pThread->GetExceptionListPtr());
// Do JIT-specific work
pEECM->LeaveCatch(gcInfoToken, offset, pCtx);
SetSP(pCtx, (UINT_PTR)esp);
return TRUE;
}
#endif // WIN64EXCEPTIONS
TRY_CATCH_FINALLY GetTcf(EHRangeTreeNode *pNode,
unsigned offset)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
_ASSERTE(pNode->IsRange() && !pNode->IsRoot());
TRY_CATCH_FINALLY tcf;
if (!pNode->Contains(offset))
{
tcf = TCF_NONE;
}
else if (pNode->TryContains(offset))
{
tcf = TCF_TRY;
}
else if (pNode->FilterContains(offset))
{
tcf = TCF_FILTER;
}
else
{
_ASSERTE(pNode->HandlerContains(offset));
if (IsFaultOrFinally(pNode->m_clause))
tcf = TCF_FINALLY;
else
tcf = TCF_CATCH;
}
return tcf;
}
const DWORD bEnter = 0x01;
const DWORD bLeave = 0x02;
HRESULT IsLegalTransition(Thread *pThread,
bool fCanSetIPOnly,
DWORD fEnter,
EHRangeTreeNode *pNode,
DWORD offFrom,
DWORD offTo,
ICodeManager* pEECM,
PREGDISPLAY pReg,
SLOT addrStart,
GCInfoToken gcInfoToken,
PCONTEXT pCtx)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
#ifdef _DEBUG
if (fEnter & bEnter)
{
_ASSERTE(pNode->Contains(offTo));
}
if (fEnter & bLeave)
{
_ASSERTE(pNode->Contains(offFrom));
}
#endif //_DEBUG
// First, figure out where we're coming from/going to
TRY_CATCH_FINALLY tcfFrom = GetTcf(pNode,
offFrom);
TRY_CATCH_FINALLY tcfTo = GetTcf(pNode,
offTo);
LOG((LF_CORDB, LL_INFO10000, "ILT: from %s to %s\n",
TCFStringFromConst(tcfFrom),
TCFStringFromConst(tcfTo)));
// Now we'll consider, case-by-case, the various permutations that
// can arise
switch(tcfFrom)
{
case TCF_NONE:
case TCF_TRY:
{
switch(tcfTo)
{
case TCF_NONE:
case TCF_TRY:
{
return S_OK;
break;
}
case TCF_FILTER:
{
return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
break;
}
case TCF_CATCH:
{
return CORDBG_E_CANT_SET_IP_INTO_CATCH;
break;
}
case TCF_FINALLY:
{
return CORDBG_E_CANT_SET_IP_INTO_FINALLY;
break;
}
default:
break;
}
break;
}
case TCF_FILTER:
{
switch(tcfTo)
{
case TCF_NONE:
case TCF_TRY:
case TCF_CATCH:
case TCF_FINALLY:
{
return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
break;
}
case TCF_FILTER:
{
return S_OK;
break;
}
default:
break;
}
break;
}
case TCF_CATCH:
{
switch(tcfTo)
{
case TCF_NONE:
case TCF_TRY:
{
#if !defined(WIN64EXCEPTIONS)
CONTEXT *pFilterCtx = pThread->GetFilterContext();
if (pFilterCtx == NULL)
return CORDBG_E_SET_IP_IMPOSSIBLE;
if (!fCanSetIPOnly)
{
if (!LeaveCatch(pEECM,
pThread,
pFilterCtx,
gcInfoToken,
offFrom))
return E_FAIL;
}
return S_OK;
#else // WIN64EXCEPTIONS
// <NOTE>
// Setting IP out of a catch clause is not supported for WIN64EXCEPTIONS because of funclets.
// This scenario is disabled with approval from VS because it's not considered to
// be a common user scenario.
// </NOTE>
return CORDBG_E_CANT_SET_IP_OUT_OF_CATCH_ON_WIN64;
#endif // !WIN64EXCEPTIONS
break;
}
case TCF_FILTER:
{
return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
break;
}
case TCF_CATCH:
{
return S_OK;
break;
}
case TCF_FINALLY:
{
return CORDBG_E_CANT_SET_IP_INTO_FINALLY;
break;
}
default:
break;
}
break;
}
case TCF_FINALLY:
{
switch(tcfTo)
{
case TCF_NONE:
case TCF_TRY:
{
#ifndef WIN64EXCEPTIONS
if (!FinallyIsUnwinding(pNode, pEECM, pReg, addrStart))
{
CONTEXT *pFilterCtx = pThread->GetFilterContext();
if (pFilterCtx == NULL)
return CORDBG_E_SET_IP_IMPOSSIBLE;
if (!fCanSetIPOnly)
{
if (!pEECM->LeaveFinally(gcInfoToken,
offFrom,
pFilterCtx))
return E_FAIL;
}
return S_OK;
}
else
{
return CORDBG_E_CANT_SET_IP_OUT_OF_FINALLY;
}
#else // !WIN64EXCEPTIONS
// <NOTE>
// Setting IP out of a non-unwinding finally clause is not supported on WIN64EXCEPTIONS because of funclets.
// This scenario is disabled with approval from VS because it's not considered to be a common user
// scenario.
// </NOTE>
return CORDBG_E_CANT_SET_IP_OUT_OF_FINALLY_ON_WIN64;
#endif // WIN64EXCEPTIONS
break;
}
case TCF_FILTER:
{
return CORDBG_E_CANT_SETIP_INTO_OR_OUT_OF_FILTER;
break;
}
case TCF_CATCH:
{
return CORDBG_E_CANT_SET_IP_INTO_CATCH;
break;
}
case TCF_FINALLY:
{
return S_OK;
break;
}
default:
break;
}
break;
}
break;
default:
break;
}
_ASSERTE( !"IsLegalTransition: We should never reach this point!" );
return CORDBG_E_SET_IP_IMPOSSIBLE;
}
// We need this to determine what
// to do based on whether the stack in general is empty
HRESULT DestinationIsValid(void *pDjiToken,
DWORD offTo,
EHRangeTree *pEHRT)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
// We'll add a call to the DebugInterface that takes this
// & tells us if the destination is a stack empty point.
// DebuggerJitInfo *pDji = (DebuggerJitInfo *)pDjiToken;
if (pEHRT->isAtStartOfCatch(offTo))
return CORDBG_S_BAD_START_SEQUENCE_POINT;
else
return S_OK;
} // HRESULT DestinationIsValid()
// We want to keep the 'worst' HRESULT - if one has failed (..._E_...) & the
// other hasn't, take the failing one. If they've both/neither failed, then
// it doesn't matter which we take.
// Note that this macro favors retaining the first argument
#define WORST_HR(hr1,hr2) (FAILED(hr1)?hr1:hr2)
HRESULT SetIPFromSrcToDst(Thread *pThread,
SLOT addrStart, // base address of method
DWORD offFrom, // native offset
DWORD offTo, // native offset
bool fCanSetIPOnly, // if true, don't do any real work
PREGDISPLAY pReg,
PCONTEXT pCtx,
void *pDji,
EHRangeTree *pEHRT)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(return E_OUTOFMEMORY;);
}
CONTRACTL_END;
HRESULT hr = S_OK;
HRESULT hrReturn = S_OK;
bool fCheckOnly = true;
EECodeInfo codeInfo((TADDR)(addrStart));
ICodeManager * pEECM = codeInfo.GetCodeManager();
GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
// Do both checks here so compiler doesn't complain about skipping
// initialization b/c of goto.
if (fCanSetIPOnly && !pEECM->IsGcSafe(&codeInfo, offFrom))
{
hrReturn = WORST_HR(hrReturn, CORDBG_E_SET_IP_IMPOSSIBLE);
}
if (fCanSetIPOnly && !pEECM->IsGcSafe(&codeInfo, offTo))
{
hrReturn = WORST_HR(hrReturn, CORDBG_E_SET_IP_IMPOSSIBLE);
}
if ((hr = DestinationIsValid(pDji, offTo, pEHRT)) != S_OK
&& fCanSetIPOnly)
{
hrReturn = WORST_HR(hrReturn,hr);
}
// The basic approach is this: We'll start with the most specific (smallest)
// EHClause that contains the starting address. We'll 'back out', to larger
// and larger ranges, until we either find an EHClause that contains both
// the from and to addresses, or until we reach the root EHRangeTreeNode,
// which contains all addresses within it. At each step, we check/do work
// that the various transitions (from inside to outside a catch, etc).
// At that point, we do the reverse process - we go from the EHClause that
// encompasses both from and to, and narrow down to the smallest EHClause that
// encompasses the to point. We use our nifty data structure to manage
// the tree structure inherent in this process.
//
// NOTE: We do this process twice, once to check that we're not doing an
// overall illegal transition, such as ultimately set the IP into
// a catch, which is never allowed. We're doing this because VS
// calls SetIP without calling CanSetIP first, and so we should be able
// to return an error code and have the stack in the same condition
// as the start of the call, and so we shouldn't back out of clauses
// or move into them until we're sure that can be done.
retryForCommit:
EHRangeTreeNode *node;
EHRangeTreeNode *nodeNext;
node = pEHRT->FindMostSpecificContainer(offFrom);
while (!node->Contains(offTo))
{
hr = IsLegalTransition(pThread,
fCheckOnly,
bLeave,
node,
offFrom,
offTo,
pEECM,
pReg,
addrStart,
gcInfoToken,
pCtx);
if (FAILED(hr))
{
hrReturn = WORST_HR(hrReturn,hr);
}
node = node->GetContainer();
// m_root prevents node from ever being NULL.
}
if (node != pEHRT->m_root)
{
hr = IsLegalTransition(pThread,
fCheckOnly,
bEnter|bLeave,
node,
offFrom,
offTo,
pEECM,
pReg,
addrStart,
gcInfoToken,
pCtx);
if (FAILED(hr))
{
hrReturn = WORST_HR(hrReturn,hr);
}
}
nodeNext = pEHRT->FindNextMostSpecificContainer(node,
offTo);
while(nodeNext != node)
{
hr = IsLegalTransition(pThread,
fCheckOnly,
bEnter,
nodeNext,
offFrom,
offTo,
pEECM,
pReg,
addrStart,
gcInfoToken,
pCtx);
if (FAILED(hr))
{
hrReturn = WORST_HR(hrReturn, hr);
}
node = nodeNext;
nodeNext = pEHRT->FindNextMostSpecificContainer(node,
offTo);
}
// If it was the intention to actually set the IP and the above transition checks succeeded,
// then go back and do it all again but this time widen and narrow the thread's actual scope
if (!fCanSetIPOnly && fCheckOnly && SUCCEEDED(hrReturn))
{
fCheckOnly = false;
goto retryForCommit;
}
return hrReturn;
} // HRESULT SetIPFromSrcToDst()
// This function should only be called if the thread is suspended and sitting in jitted code
BOOL IsInFirstFrameOfHandler(Thread *pThread, IJitManager *pJitManager, const METHODTOKEN& MethodToken, DWORD offset)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
}
CONTRACTL_END;
// if don't have a throwable the aren't processing an exception
if (IsHandleNullUnchecked(pThread->GetThrowableAsHandle()))
return FALSE;
EH_CLAUSE_ENUMERATOR pEnumState;
unsigned EHCount = pJitManager->InitializeEHEnumeration(MethodToken, &pEnumState);
for(ULONG i=0; i < EHCount; i++)
{
EE_ILEXCEPTION_CLAUSE EHClause;
pJitManager->GetNextEHClause(&pEnumState, &EHClause);
_ASSERTE(IsValidClause(&EHClause));
if ( offset >= EHClause.HandlerStartPC && offset < EHClause.HandlerEndPC)
return TRUE;
// check if it's in the filter itself if we're not in the handler
if (IsFilterHandler(&EHClause) && offset >= EHClause.FilterOffset && offset < EHClause.HandlerStartPC)
return TRUE;
}
return FALSE;
} // BOOL IsInFirstFrameOfHandler()
#if !defined(WIN64EXCEPTIONS)
//******************************************************************************
// LookForHandler -- search for a function that will handle the exception.
//******************************************************************************
LFH LookForHandler( // LFH return types
const EXCEPTION_POINTERS *pExceptionPointers, // The ExceptionRecord and ExceptionContext
Thread *pThread, // Thread on which to look (always current?)
ThrowCallbackType *tct) // Structure to pass back to callback functions.
{
// We don't want to use a runtime contract here since this codepath is used during
// the processing of a hard SO. Contracts use a significant amount of stack
// which we can't afford for those cases.
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
// go through to find if anyone handles the exception
StackWalkAction action = pThread->StackWalkFrames((PSTACKWALKFRAMESCALLBACK)COMPlusThrowCallback,
tct,
0, //can't use FUNCTIONSONLY because the callback uses non-function frames to stop the walk
tct->pBottomFrame);
// If someone handles it, the action will be SWA_ABORT with pFunc and dHandler indicating the
// function and handler that is handling the exception. Debugger can put a hook in here.
if (action == SWA_ABORT && tct->pFunc != NULL)
return LFH_FOUND;
// nobody is handling it
return LFH_NOT_FOUND;
} // LFH LookForHandler()
StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData);
//******************************************************************************
// UnwindFrames
//******************************************************************************
void UnwindFrames( // No return value.
Thread *pThread, // Thread to unwind.
ThrowCallbackType *tct) // Structure to pass back to callback function.
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
if (pThread->IsExceptionInProgress())
{
pThread->GetExceptionState()->GetFlags()->SetUnwindHasStarted();
}
#ifdef DEBUGGING_SUPPORTED
//
// If a debugger is attached, notify it that unwinding is going on.
//
if (CORDebuggerAttached())
{
g_pDebugInterface->ManagedExceptionUnwindBegin(pThread);
}
#endif // DEBUGGING_SUPPORTED
LOG((LF_EH, LL_INFO1000, "UnwindFrames: going to: pFunc:%#X, pStack:%#X\n",
tct->pFunc, tct->pStack));
pThread->StackWalkFrames((PSTACKWALKFRAMESCALLBACK)COMPlusUnwindCallback,
tct,
POPFRAMES,
tct->pBottomFrame);
} // void UnwindFrames()
#endif // !defined(WIN64EXCEPTIONS)
void StackTraceInfo::SaveStackTrace(BOOL bAllowAllocMem, OBJECTHANDLE hThrowable, BOOL bReplaceStack, BOOL bSkipLastElement)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Do not save stacktrace to preallocated exception. These are shared.
if (CLRException::IsPreallocatedExceptionHandle(hThrowable))
{
// Preallocated exceptions will never have this flag set. However, its possible
// that after this flag is set for a regular exception but before we throw, we have an async
// exception like a RudeThreadAbort, which will replace the exception
// containing the restored stack trace.
//
// In such a case, we should clear the flag as the throwable representing the
// preallocated exception will not have the restored (or any) stack trace.
PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState();
pCurTES->ResetRaisingForeignException();
return;
}
LOG((LF_EH, LL_INFO1000, "StackTraceInfo::SaveStackTrace (%p), alloc = %d, replace = %d, skiplast = %d\n", this, bAllowAllocMem, bReplaceStack, bSkipLastElement));
// if have bSkipLastElement, must also keep the stack
_ASSERTE(! bSkipLastElement || ! bReplaceStack);
bool fSuccess = false;
MethodTable* pMT = ObjectFromHandle(hThrowable)->GetTrueMethodTable();
// Check if the flag indicating foreign exception raise has been setup or not,
// and then reset it so that subsequent processing of managed frames proceeds
// normally.
PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState();
BOOL fRaisingForeignException = pCurTES->IsRaisingForeignException();
pCurTES->ResetRaisingForeignException();
if (bAllowAllocMem && m_dFrameCount != 0)
{
EX_TRY
{
// Only save stack trace info on exceptions
_ASSERTE(IsException(pMT)); // what is the pathway here?
if (!IsException(pMT))
{
fSuccess = true;
}
else
{
// If the stack trace contains DynamicMethodDescs, we need to save the corrosponding
// System.Resolver objects in the Exception._dynamicMethods field. Failing to do that
// will cause an AV in the runtime when we try to visit those MethodDescs in the
// Exception._stackTrace field, because they have been recycled or destroyed.
unsigned iNumDynamics = 0;
// How many DynamicMethodDescs do we need to keep alive?
for (unsigned iElement=0; iElement < m_dFrameCount; iElement++)
{
MethodDesc *pMethod = m_pStackTrace[iElement].pFunc;
_ASSERTE(pMethod);
if (pMethod->IsLCGMethod())
{
// Increment the number of new dynamic methods we have found
iNumDynamics++;
}
else
if (pMethod->GetMethodTable()->Collectible())
{
iNumDynamics++;
}
}
struct _gc
{
StackTraceArray stackTrace;
StackTraceArray stackTraceTemp;
PTRARRAYREF dynamicMethodsArrayTemp;
PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
PTRARRAYREF pOrigDynamicArray;
_gc()
: stackTrace()
, stackTraceTemp()
, dynamicMethodsArrayTemp(static_cast<PTRArray *>(NULL))
, dynamicMethodsArray(static_cast<PTRArray *>(NULL))
, pOrigDynamicArray(static_cast<PTRArray *>(NULL))
{}
};
_gc gc;
GCPROTECT_BEGIN(gc);
// If the flag indicating foreign exception raise has been setup, then check
// if the exception object has stacktrace or not. If we have an async non-preallocated
// exception after setting this flag but before we throw, then the new
// exception will not have any stack trace set and thus, we should behave as if
// the flag was not setup.
if (fRaisingForeignException)
{
// Get the reference to stack trace and reset our flag if applicable.
((EXCEPTIONREF)ObjectFromHandle(hThrowable))->GetStackTrace(gc.stackTraceTemp);
if (gc.stackTraceTemp.Size() == 0)
{
fRaisingForeignException = FALSE;
}
}
// Replace stack (i.e. build a new stack trace) only if we are not raising a foreign exception.
// If we are, then we will continue to extend the existing stack trace.
if (bReplaceStack
&& (!fRaisingForeignException)
)
{
// Cleanup previous info
gc.stackTrace.Append(m_pStackTrace, m_pStackTrace + m_dFrameCount);
if (iNumDynamics)
{
// Adjust the allocation size of the array, if required
if (iNumDynamics > m_cDynamicMethodItems)
{
S_UINT32 cNewSize = S_UINT32(2) * S_UINT32(iNumDynamics);
if (cNewSize.IsOverflow())
{
// Overflow here implies we cannot allocate memory anymore
LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - Cannot calculate initial resolver array size due to overflow!\n"));
COMPlusThrowOM();
}
m_cDynamicMethodItems = cNewSize.Value();
}
gc.dynamicMethodsArray = (PTRARRAYREF)AllocateObjectArray(m_cDynamicMethodItems, g_pObjectClass);
LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - allocated dynamic array for first frame of size %lu\n",
m_cDynamicMethodItems));
}
m_dCurrentDynamicIndex = 0;
}
else
{
// Fetch the stacktrace and the dynamic method array
((EXCEPTIONREF)ObjectFromHandle(hThrowable))->GetStackTrace(gc.stackTrace, &gc.pOrigDynamicArray);
if (fRaisingForeignException)
{
// Just before we append to the stack trace, mark the last recorded frame to be from
// the foreign thread so that we can insert an annotation indicating so when building
// the stack trace string.
size_t numCurrentFrames = gc.stackTrace.Size();
if (numCurrentFrames > 0)
{
// "numCurrentFrames" can be zero if the user created an EDI using
// an unthrown exception.
StackTraceElement & refLastElementFromForeignStackTrace = gc.stackTrace[numCurrentFrames - 1];
refLastElementFromForeignStackTrace.fIsLastFrameFromForeignStackTrace = TRUE;
}
}
if (!bSkipLastElement)
gc.stackTrace.Append(m_pStackTrace, m_pStackTrace + m_dFrameCount);
//////////////////////////////
unsigned cOrigDynamic = 0; // number of objects in the old array
if (gc.pOrigDynamicArray != NULL)
{
cOrigDynamic = gc.pOrigDynamicArray->GetNumComponents();
}
else
{
// Since there is no dynamic method array, reset the corresponding state variables
m_dCurrentDynamicIndex = 0;
m_cDynamicMethodItems = 0;
}
if ((gc.pOrigDynamicArray != NULL)
|| (fRaisingForeignException)
)
{
// Since we have just restored the dynamic method array as well,
// calculate the dynamic array index which would be the total
// number of dynamic methods present in the stack trace.
//
// In addition to the ForeignException scenario, we need to reset these
// values incase the exception object in question is being thrown by
// multiple threads in parallel and thus, could have potentially different
// dynamic method array contents/size as opposed to the current state of
// StackTraceInfo.
unsigned iStackTraceElements = (unsigned)gc.stackTrace.Size();
m_dCurrentDynamicIndex = 0;
for (unsigned iIndex = 0; iIndex < iStackTraceElements; iIndex++)
{
MethodDesc *pMethod = gc.stackTrace[iIndex].pFunc;
if (pMethod)
{
if ((pMethod->IsLCGMethod()) || (pMethod->GetMethodTable()->Collectible()))
{
// Increment the number of new dynamic methods we have found
m_dCurrentDynamicIndex++;
}
}
}
// Total number of elements in the dynamic method array should also be
// reset based upon the restored array size.
m_cDynamicMethodItems = cOrigDynamic;
}
// Make the dynamic Array field reference the original array we got from the
// Exception object. If, below, we have to add new entries, we will add it to the
// array if it is allocated, or else, we will allocate it before doing so.
gc.dynamicMethodsArray = gc.pOrigDynamicArray;
// Create an object array if we have new dynamic method entries AND
// if we are at the (or went past) the current size limit
if (iNumDynamics > 0)
{
// Reallocate the array if we are at the (or went past) the current size limit
unsigned cTotalDynamicMethodCount = m_dCurrentDynamicIndex;
S_UINT32 cNewSum = S_UINT32(cTotalDynamicMethodCount) + S_UINT32(iNumDynamics);
if (cNewSum.IsOverflow())
{
// If the current size is already the UINT32 max size, then we
// cannot go further. Overflow here implies we cannot allocate memory anymore.
LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - Cannot calculate resolver array size due to overflow!\n"));
COMPlusThrowOM();
}
cTotalDynamicMethodCount = cNewSum.Value();
if (cTotalDynamicMethodCount > m_cDynamicMethodItems)
{
// Double the current limit of the array.
S_UINT32 cNewSize = S_UINT32(2) * S_UINT32(cTotalDynamicMethodCount);
if (cNewSize.IsOverflow())
{
// Overflow here implies that we cannot allocate any more memory
LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - Cannot resize resolver array beyond max size due to overflow!\n"));
COMPlusThrowOM();
}
m_cDynamicMethodItems = cNewSize.Value();
gc.dynamicMethodsArray = (PTRARRAYREF)AllocateObjectArray(m_cDynamicMethodItems,
g_pObjectClass);
_ASSERTE(!(cOrigDynamic && !gc.pOrigDynamicArray));
LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - resized dynamic array to size %lu\n",
m_cDynamicMethodItems));
// Copy previous entries if there are any, and update iCurDynamic to point
// to the following index.
if (cOrigDynamic && (gc.pOrigDynamicArray != NULL))
{
memmoveGCRefs(gc.dynamicMethodsArray->GetDataPtr(),
gc.pOrigDynamicArray->GetDataPtr(),
cOrigDynamic * sizeof(Object *));
// m_dCurrentDynamicIndex is already referring to the correct index
// at which the next resolver object will be saved
}
}
else
{
// We are adding objects to the existing array.
//
// We have new dynamic method entries for which
// resolver objects need to be saved. Ensure
// that we have the array to store them
if (gc.dynamicMethodsArray == NULL)
{
_ASSERTE(m_cDynamicMethodItems > 0);
gc.dynamicMethodsArray = (PTRARRAYREF)AllocateObjectArray(m_cDynamicMethodItems,
g_pObjectClass);
m_dCurrentDynamicIndex = 0;
LOG((LF_EH, LL_INFO100, "StackTraceInfo::SaveStackTrace - allocated dynamic array of size %lu\n",
m_cDynamicMethodItems));
}
else
{
// The array exists for storing resolver objects.
// Simply set the index at which the next resolver
// will be stored in it.
}
}
}
}
// Update _dynamicMethods field
if (iNumDynamics)
{
// At this point, we should be having a valid array for storage
_ASSERTE(gc.dynamicMethodsArray != NULL);
// Assert that we are in valid range of the array in which resolver objects will be saved.
// We subtract 1 below since storage will start from m_dCurrentDynamicIndex onwards and not
// from (m_dCurrentDynamicIndex + 1).
_ASSERTE((m_dCurrentDynamicIndex + iNumDynamics - 1) < gc.dynamicMethodsArray->GetNumComponents());
for (unsigned i=0; i < m_dFrameCount; i++)
{
MethodDesc *pMethod = m_pStackTrace[i].pFunc;
_ASSERTE(pMethod);
if (pMethod->IsLCGMethod())
{
// We need to append the corresponding System.Resolver for
// this DynamicMethodDesc to keep it alive.
DynamicMethodDesc *pDMD = (DynamicMethodDesc *) pMethod;
OBJECTREF pResolver = pDMD->GetLCGMethodResolver()->GetManagedResolver();
_ASSERTE(pResolver != NULL);
// Store Resolver information in the array
gc.dynamicMethodsArray->SetAt(m_dCurrentDynamicIndex++, pResolver);
}
else
if (pMethod->GetMethodTable()->Collectible())
{
OBJECTREF pLoaderAllocator = pMethod->GetMethodTable()->GetLoaderAllocator()->GetExposedObject();
_ASSERTE(pLoaderAllocator != NULL);
gc.dynamicMethodsArray->SetAt (m_dCurrentDynamicIndex++, pLoaderAllocator);
}
}
}
((EXCEPTIONREF)ObjectFromHandle(hThrowable))->SetStackTrace(gc.stackTrace, gc.dynamicMethodsArray);
// Update _stackTraceString field.
((EXCEPTIONREF)ObjectFromHandle(hThrowable))->SetStackTraceString(NULL);
fSuccess = true;
GCPROTECT_END(); // gc
}
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions)
}
ClearStackTrace();
if (!fSuccess)
{
EX_TRY
{
_ASSERTE(IsException(pMT)); // what is the pathway here?
if (bReplaceStack && IsException(pMT))
((EXCEPTIONREF)ObjectFromHandle(hThrowable))->ClearStackTraceForThrow();
}
EX_CATCH
{
// Do nothing
}
EX_END_CATCH(SwallowAllExceptions);
}
}
// Copy a context record, being careful about whether or not the target
// is large enough to support CONTEXT_EXTENDED_REGISTERS.
//
// NOTE: this function can ONLY be used when a filter function will return
// EXCEPTION_CONTINUE_EXECUTION. On AMD64, replacing the CONTEXT in any other
// situation may break exception unwinding.
//
// NOTE: this function MUST be used on AMD64. During exception handling,
// parts of the CONTEXT struct must not be modified.
// High 2 bytes are machine type. Low 2 bytes are register subset.
#define CONTEXT_EXTENDED_BIT (CONTEXT_EXTENDED_REGISTERS & 0xffff)
VOID
ReplaceExceptionContextRecord(CONTEXT *pTarget, CONTEXT *pSource)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pTarget);
_ASSERTE(pSource);
#if defined(_TARGET_X86_)
//<TODO>
// @TODO IA64: CONTEXT_DEBUG_REGISTERS not defined on IA64, may need updated SDK
//</TODO>
// Want CONTROL, INTEGER, SEGMENTS. If we have Floating Point, fine.
_ASSERTE((pSource->ContextFlags & CONTEXT_FULL) == CONTEXT_FULL);
#endif // _TARGET_X86_
#ifdef CONTEXT_EXTENDED_REGISTERS
if (pSource->ContextFlags & CONTEXT_EXTENDED_BIT)
{
if (pTarget->ContextFlags & CONTEXT_EXTENDED_BIT)
{ // Source and Target have EXTENDED bit set.
*pTarget = *pSource;
}
else
{ // Source has but Target doesn't have EXTENDED bit set. (Target is shorter than Source.)
// Copy non-extended part of the struct, and reset the bit on the Target, as it was.
memcpy(pTarget, pSource, offsetof(CONTEXT, ExtendedRegisters));
pTarget->ContextFlags &= ~CONTEXT_EXTENDED_BIT; // Target was short. Reset the extended bit.
}
}
else
{ // Source does not have EXTENDED bit. Copy only non-extended part of the struct.
memcpy(pTarget, pSource, offsetof(CONTEXT, ExtendedRegisters));
}
STRESS_LOG3(LF_SYNC, LL_INFO1000, "ReSet thread context EIP = %p ESP = %p EBP = %p\n",
GetIP((CONTEXT*)pTarget), GetSP((CONTEXT*)pTarget), GetFP((CONTEXT*)pTarget));
#else // !CONTEXT_EXTENDED_REGISTERS
// Everything that's left
*pTarget = *pSource;
#endif // !CONTEXT_EXTENDED_REGISTERS
}
VOID FixupOnRethrow(Thread* pCurThread, EXCEPTION_POINTERS* pExceptionPointers)
{
WRAPPER_NO_CONTRACT;
ThreadExceptionState* pExState = pCurThread->GetExceptionState();
#ifdef FEATURE_INTERPRETER
// Abort if we don't have any state from the original exception.
if (!pExState->IsExceptionInProgress())
{
return;
}
#endif // FEATURE_INTERPRETER
// Don't allow rethrow of a STATUS_STACK_OVERFLOW -- it's a new throw of the COM+ exception.
if (pExState->GetExceptionCode() == STATUS_STACK_OVERFLOW)
{
return;
}
// For COMPLUS exceptions, we don't need the original context for our rethrow.
if (!(pExState->IsComPlusException()))
{
_ASSERTE(pExState->GetExceptionRecord());
// don't copy parm args as have already supplied them on the throw
memcpy((void*)pExceptionPointers->ExceptionRecord,
(void*)pExState->GetExceptionRecord(),
offsetof(EXCEPTION_RECORD, ExceptionInformation));
// Replacing the exception context breaks unwinding on AMD64. It also breaks exception dispatch on IA64.
// The info saved by pExState will be given to exception filters.
#ifndef WIN64EXCEPTIONS
// Restore original context if available.
if (pExState->GetContextRecord())
{
ReplaceExceptionContextRecord(pExceptionPointers->ContextRecord,
pExState->GetContextRecord());
}
#endif // !WIN64EXCEPTIONS
}
pExState->GetFlags()->SetIsRethrown();
}
struct RaiseExceptionFilterParam
{
BOOL isRethrown;
};
LONG RaiseExceptionFilter(EXCEPTION_POINTERS* ep, LPVOID pv)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
RaiseExceptionFilterParam *pParam = (RaiseExceptionFilterParam *) pv;
if (1 == pParam->isRethrown)
{
// need to reset the EH info back to the original thrown exception
FixupOnRethrow(GetThread(), ep);
#ifdef WIN64EXCEPTIONS
// only do this once
pParam->isRethrown++;
#endif // WIN64EXCEPTIONS
}
else
{
CONSISTENCY_CHECK((2 == pParam->isRethrown) || (0 == pParam->isRethrown));
}
return EXCEPTION_CONTINUE_SEARCH;
}
//==========================================================================
// Throw an object.
//==========================================================================
VOID DECLSPEC_NORETURN RaiseTheException(OBJECTREF throwable, BOOL rethrow
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, CorruptionSeverity severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
LOG((LF_EH, LL_INFO100, "RealCOMPlusThrow throwing %s\n",
throwable->GetTrueMethodTable()->GetDebugClassName()));
if (throwable == NULL)
{
_ASSERTE(!"RealCOMPlusThrow(OBJECTREF) called with NULL argument. Somebody forgot to post an exception!");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
if (g_CLRPolicyRequested &&
throwable->GetMethodTable() == g_pOutOfMemoryExceptionClass)
{
// We depends on UNINSTALL_UNWIND_AND_CONTINUE_HANDLER to handle out of memory escalation.
// We should throw c++ exception instead.
ThrowOutOfMemory();
}
#ifdef FEATURE_STACK_PROBE
else if (throwable == CLRException::GetPreallocatedStackOverflowException())
{
ThrowStackOverflow();
}
#else
_ASSERTE(throwable != CLRException::GetPreallocatedStackOverflowException());
#endif
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
if (!g_pConfig->LegacyCorruptedStateExceptionsPolicy())
{
// This is Scenario 3 described in clrex.h around the definition of SET_CE_RETHROW_FLAG_FOR_EX_CATCH macro.
//
// We are here because the VM is attempting to throw a managed exception. It is posssible this exception
// may not be seen by CLR's exception handler for managed code (e.g. there maybe an EX_CATCH up the stack
// that will swallow or rethrow this exception). In the following scenario:
//
// [VM1 - RethrowCSE] -> [VM2 - RethrowCSE] -> [VM3 - RethrowCSE] -> <managed code>
//
// When managed code throws a CSE (e.g. TargetInvocationException flagged as CSE), [VM3] will rethrow it and we will
// enter EX_CATCH in VM2 which is supposed to rethrow it as well. Two things can happen:
//
// 1) The implementation of EX_CATCH in VM2 throws a new managed exception *before* rethrow policy is applied and control
// will reach EX_CATCH in VM1, OR
//
// 2) EX_CATCH in VM2 swallows the exception, comes out of the catch block and later throws a new managed exception that
// will be caught by EX_CATCH in VM1.
//
// In either of the cases, rethrow in VM1 should be on the basis of the new managed exception's corruption severity.
//
// To support this scenario, we set corruption severity of the managed exception VM is throwing. If its a rethrow,
// it implies we are rethrowing the last exception that was seen by CLR's managed code exception handler. In such a case,
// we will copy over the corruption severity of that exception.
// If throwable indicates corrupted state, forcibly set the severity.
if (CEHelper::IsProcessCorruptedStateException(throwable))
{
severity = ProcessCorrupting;
}
// No one should have passed us an invalid severity.
_ASSERTE(severity > NotSet);
if (severity == NotSet)
{
severity = NotCorrupting;
}
// Update the corruption severity of the exception being thrown by the VM.
GetThread()->GetExceptionState()->SetLastActiveExceptionCorruptionSeverity(severity);
// Exception's corruption severity should be reused in reraise if this exception leaks out from the VM
// into managed code
CEHelper::MarkLastActiveExceptionCorruptionSeverityForReraiseReuse();
LOG((LF_EH, LL_INFO100, "RaiseTheException - Set VM thrown managed exception severity to %d.\n", severity));
}
#endif // FEATURE_CORRUPTING_EXCEPTIONS
RaiseTheExceptionInternalOnly(throwable,rethrow);
}
HRESULT GetHRFromThrowable(OBJECTREF throwable)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
HRESULT hr = E_FAIL;
MethodTable *pMT = throwable->GetTrueMethodTable();
// Only Exception objects have a HResult field
// So don't fetch the field unless we have an exception
_ASSERTE(IsException(pMT)); // what is the pathway here?
if (IsException(pMT))
{
hr = ((EXCEPTIONREF)throwable)->GetHResult();
}
return hr;
}
VOID DECLSPEC_NORETURN RaiseTheExceptionInternalOnly(OBJECTREF throwable, BOOL rethrow, BOOL fForStackOverflow)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
STRESS_LOG3(LF_EH, LL_INFO100, "******* MANAGED EXCEPTION THROWN: Object thrown: %p MT %pT rethrow %d\n",
OBJECTREFToObject(throwable), (throwable!=0)?throwable->GetMethodTable():0, rethrow);
#ifdef STRESS_LOG
// Any object could have been thrown, but System.Exception objects have useful information for the stress log
if (!NingenEnabled() && throwable == CLRException::GetPreallocatedStackOverflowException())
{
// if are handling an SO, don't try to get all that other goop. It isn't there anyway,
// and it could cause us to take another SO.
STRESS_LOG1(LF_EH, LL_INFO100, "Exception HRESULT = 0x%x \n", COR_E_STACKOVERFLOW);
}
else if (throwable != 0)
{
_ASSERTE(IsException(throwable->GetMethodTable()));
int hr = ((EXCEPTIONREF)throwable)->GetHResult();
STRINGREF message = ((EXCEPTIONREF)throwable)->GetMessage();
OBJECTREF innerEH = ((EXCEPTIONREF)throwable)->GetInnerException();
STRESS_LOG4(LF_EH, LL_INFO100, "Exception HRESULT = 0x%x Message String 0x%p (db will display) InnerException %p MT %pT\n",
hr, OBJECTREFToObject(message), OBJECTREFToObject(innerEH), (innerEH!=0)?innerEH->GetMethodTable():0);
}
#endif
struct Param : RaiseExceptionFilterParam
{
OBJECTREF throwable;
BOOL fForStackOverflow;
ULONG_PTR exceptionArgs[INSTANCE_TAGGED_SEH_PARAM_ARRAY_SIZE];
Thread *pThread;
ThreadExceptionState* pExState;
} param;
param.isRethrown = rethrow ? 1 : 0; // normalize because we use it as a count in RaiseExceptionFilter
param.throwable = throwable;
param.fForStackOverflow = fForStackOverflow;
param.pThread = GetThread();
_ASSERTE(param.pThread);
param.pExState = param.pThread->GetExceptionState();
if (param.pThread->IsRudeAbortInitiated())
{
// Nobody should be able to swallow rude thread abort.
param.throwable = CLRException::GetPreallocatedRudeThreadAbortException();
}
#if 0
// TODO: enable this after we change RealCOMPlusThrow
#ifdef _DEBUG
// If ThreadAbort exception is thrown, the thread should be marked with AbortRequest.
// If not, we may see unhandled exception.
if (param.throwable->GetTrueMethodTable() == g_pThreadAbortExceptionClass)
{
_ASSERTE(GetThread()->IsAbortRequested()
#ifdef _TARGET_X86_
||
GetFirstCOMPlusSEHRecord(this) == EXCEPTION_CHAIN_END
#endif
);
}
#endif
#endif
// raise
PAL_TRY(Param *, pParam, &param)
{
//_ASSERTE(! pParam->isRethrown || pParam->pExState->m_pExceptionRecord);
ULONG_PTR *args = NULL;
ULONG argCount = 0;
ULONG flags = 0;
ULONG code = 0;
// Always save the current object in the handle so on rethrow we can reuse it. This is important as it
// contains stack trace info.
//
// Note: we use SafeSetLastThrownObject, which will try to set the throwable and if there are any problems,
// it will set the throwable to something appropiate (like OOM exception) and return the new
// exception. Thus, the user's exception object can be replaced here.
pParam->throwable = NingenEnabled() ? NULL : pParam->pThread->SafeSetLastThrownObject(pParam->throwable);
if (!pParam->isRethrown ||
#ifdef FEATURE_INTERPRETER
!pParam->pExState->IsExceptionInProgress() ||
#endif // FEATURE_INTERPRETER
pParam->pExState->IsComPlusException() ||
(pParam->pExState->GetExceptionCode() == STATUS_STACK_OVERFLOW))
{
ULONG_PTR hr = NingenEnabled() ? E_FAIL : GetHRFromThrowable(pParam->throwable);
args = pParam->exceptionArgs;
argCount = MarkAsThrownByUs(args, hr);
flags = EXCEPTION_NONCONTINUABLE;
code = EXCEPTION_COMPLUS;
}
else
{
// Exception code should be consistent.
_ASSERTE((DWORD)(pParam->pExState->GetExceptionRecord()->ExceptionCode) == pParam->pExState->GetExceptionCode());
args = pParam->pExState->GetExceptionRecord()->ExceptionInformation;
argCount = pParam->pExState->GetExceptionRecord()->NumberParameters;
flags = pParam->pExState->GetExceptionRecord()->ExceptionFlags;
code = pParam->pExState->GetExceptionRecord()->ExceptionCode;
}
if (pParam->pThread->IsAbortInitiated () && IsExceptionOfType(kThreadAbortException,&pParam->throwable))
{
pParam->pThread->ResetPreparingAbort();
if (pParam->pThread->GetFrame() == FRAME_TOP)
{
// There is no more managed code on stack.
pParam->pThread->EEResetAbort(Thread::TAR_ALL);
}
}
// Can't access the exception object when are in pre-emptive, so find out before
// if its an SO.
BOOL fIsStackOverflow = IsExceptionOfType(kStackOverflowException, &pParam->throwable);
if (fIsStackOverflow || pParam->fForStackOverflow)
{
// Don't probe if we're already handling an SO. Just throw the exception.
RaiseException(code, flags, argCount, args);
}
// Probe for sufficient stack.
PUSH_STACK_PROBE_FOR_THROW(pParam->pThread);
#ifndef STACK_GUARDS_DEBUG
// This needs to be both here and inside the handler below
// enable preemptive mode before call into OS
GCX_PREEMP_NO_DTOR();
// In non-debug, we can just raise the exception once we've probed.
RaiseException(code, flags, argCount, args);
#else
// In a debug build, we need to unwind our probe structure off the stack.
BaseStackGuard *pThrowGuard = NULL;
// Stach away the address of the guard we just pushed above in PUSH_STACK_PROBE_FOR_THROW
SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pThrowGuard);
// Add the stack guard reference to the structure below so that it can be accessed within
// PAL_TRY as well
struct ParamInner
{
ULONG code;
ULONG flags;
ULONG argCount;
ULONG_PTR *args;
BaseStackGuard *pGuard;
} param;
param.code = code;
param.flags = flags;
param.argCount = argCount;
param.args = args;
param.pGuard = pThrowGuard;
PAL_TRY(ParamInner *, pParam, &param)
{
// enable preemptive mode before call into OS
GCX_PREEMP_NO_DTOR();
RaiseException(pParam->code, pParam->flags, pParam->argCount, pParam->args);
// We never return from RaiseException, so shouldn't have to call SetNoException.
// However, in the debugger we can, and if we don't call SetNoException we get
// a short-circuit return assert.
RESET_EXCEPTION_FROM_STACK_PROBE_FOR_THROW(pParam->pGuard);
}
PAL_FINALLY
{
// pop the guard that we pushed above in PUSH_STACK_PROBE_FOR_THROW
POP_STACK_PROBE_FOR_THROW(pThrowGuard);
}
PAL_ENDTRY
#endif
}
PAL_EXCEPT_FILTER (RaiseExceptionFilter)
{
}
PAL_ENDTRY
_ASSERTE(!"Cannot continue after COM+ exception"); // Debugger can bring you here.
// For example,
// Debugger breaks in due to second chance exception (unhandled)
// User hits 'g'
// Then debugger can bring us here.
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
}
// INSTALL_COMPLUS_EXCEPTION_HANDLER has a filter, so must put the call in a separate fcn
static VOID DECLSPEC_NORETURN RealCOMPlusThrowWorker(OBJECTREF throwable, BOOL rethrow
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, CorruptionSeverity severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
) {
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
// RaiseTheException will throw C++ OOM and SO, so that our escalation policy can kick in.
// Unfortunately, COMPlusFrameHandler installed here, will try to create managed exception object.
// We may hit a recursion.
if (g_CLRPolicyRequested &&
throwable->GetMethodTable() == g_pOutOfMemoryExceptionClass)
{
// We depends on UNINSTALL_UNWIND_AND_CONTINUE_HANDLER to handle out of memory escalation.
// We should throw c++ exception instead.
ThrowOutOfMemory();
}
#ifdef FEATURE_STACK_PROBE
else if (throwable == CLRException::GetPreallocatedStackOverflowException())
{
ThrowStackOverflow();
}
#else
_ASSERTE(throwable != CLRException::GetPreallocatedStackOverflowException());
#endif
// TODO: Do we need to install COMPlusFrameHandler here?
INSTALL_COMPLUS_EXCEPTION_HANDLER();
RaiseTheException(throwable, rethrow
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
);
UNINSTALL_COMPLUS_EXCEPTION_HANDLER();
}
VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable, BOOL rethrow
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, CorruptionSeverity severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
) {
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
GCPROTECT_BEGIN(throwable);
_ASSERTE(IsException(throwable->GetMethodTable()));
// This may look a bit odd, but there is an explaination. The rethrow boolean
// means that an actual RaiseException(EXCEPTION_COMPLUS,...) is being re-thrown,
// and that the exception context saved on the Thread object should replace
// the exception context from the upcoming RaiseException(). There is logic
// in the stack trace code to preserve MOST of the stack trace, but to drop the
// last element of the stack trace (has to do with having the address of the rethrow
// instead of the address of the original call in the stack trace. That is
// controversial itself, but we won't get into that here.)
// However, if this is not re-raising that original exception, but rather a new
// os exception for what may be an existing exception object, it is generally
// a good thing to preserve the stack trace.
if (!rethrow)
{
ExceptionPreserveStackTrace(throwable);
}
RealCOMPlusThrowWorker(throwable, rethrow
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
);
GCPROTECT_END();
}
VOID DECLSPEC_NORETURN RealCOMPlusThrow(OBJECTREF throwable
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, CorruptionSeverity severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
RealCOMPlusThrow(throwable, FALSE
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, severity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
);
}
// this function finds the managed callback to get a resource
// string from the then current local domain and calls it
// this could be a lot of work
STRINGREF GetResourceStringFromManaged(STRINGREF key)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(key != NULL);
}
CONTRACTL_END;
struct xx {
STRINGREF key;
STRINGREF ret;
} gc;
gc.key = key;
gc.ret = NULL;
// The standard probe isn't good enough here. It's possible that we only have ~14 pages of stack
// left. By the time we transition to the default domain and start fetching this resource string,
// another 12 page probe could fail.
// This failing probe would cause us to unload the default appdomain, which would cause us
// to take down the process.
// Instead, let's probe for a lots more stack to make sure that doesn' happen.
// We need to have enough stack to survive 2 more probes... the original entrypoint back
// into mscorwks after we go into managed code, and a "large" probe that protects the GC
INTERIOR_STACK_PROBE_FOR(GetThread(), DEFAULT_ENTRY_PROBE_AMOUNT * 2);
GCPROTECT_BEGIN(gc);
MethodDescCallSite getResourceStringLocal(METHOD__ENVIRONMENT__GET_RESOURCE_STRING_LOCAL);
// Call Environment::GetResourceStringLocal(String name). Returns String value (or maybe null)
ENTER_DOMAIN_PTR(SystemDomain::System()->DefaultDomain(),ADV_DEFAULTAD);
// Don't need to GCPROTECT pArgs, since it's not used after the function call.
ARG_SLOT pArgs[1] = { ObjToArgSlot(gc.key) };
gc.ret = getResourceStringLocal.Call_RetSTRINGREF(pArgs);
END_DOMAIN_TRANSITION;
GCPROTECT_END();
END_INTERIOR_STACK_PROBE;
return gc.ret;
}
// This function does poentially a LOT of work (loading possibly 50 classes).
// The return value is an un-GC-protected string ref, or possibly NULL.
void ResMgrGetString(LPCWSTR wszResourceName, STRINGREF * ppMessage)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
_ASSERTE(ppMessage != NULL);
if (wszResourceName == NULL || *wszResourceName == W('\0'))
{
ppMessage = NULL;
return;
}
// this function never looks at name again after
// calling the helper so no need to GCPROTECT it
STRINGREF name = StringObject::NewString(wszResourceName);
if (wszResourceName != NULL)
{
STRINGREF value = GetResourceStringFromManaged(name);
_ASSERTE(value!=NULL || !"Resource string lookup failed - possible misspelling or .resources missing or out of date?");
*ppMessage = value;
}
}
// GetResourceFromDefault
// transition to the default domain and get a resource there
FCIMPL1(Object*, GetResourceFromDefault, StringObject* keyUnsafe)
{
FCALL_CONTRACT;
STRINGREF ret = NULL;
STRINGREF key = (STRINGREF)keyUnsafe;
HELPER_METHOD_FRAME_BEGIN_RET_2(ret, key);
ret = GetResourceStringFromManaged(key);
HELPER_METHOD_FRAME_END();
return OBJECTREFToObject(ret);
}
FCIMPLEND
void FreeExceptionData(ExceptionData *pedata)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
SO_TOLERANT;
}
CONTRACTL_END;
_ASSERTE(pedata != NULL);
// <TODO>@NICE: At one point, we had the comment:
// (DM) Remove this when shutdown works better.</TODO>
// This test may no longer be necessary. Remove at own peril.
Thread *pThread = GetThread();
if (!pThread)
return;
if (pedata->bstrSource)
SysFreeString(pedata->bstrSource);
if (pedata->bstrDescription)
SysFreeString(pedata->bstrDescription);
if (pedata->bstrHelpFile)
SysFreeString(pedata->bstrHelpFile);
#ifdef FEATURE_COMINTEROP
if (pedata->bstrRestrictedError)
SysFreeString(pedata->bstrRestrictedError);
if (pedata->bstrReference)
SysFreeString(pedata->bstrReference);
if (pedata->bstrCapabilitySid)
SysFreeString(pedata->bstrCapabilitySid);
if (pedata->pRestrictedErrorInfo)
{
ULONG cbRef = SafeRelease(pedata->pRestrictedErrorInfo);
LogInteropRelease(pedata->pRestrictedErrorInfo, cbRef, "IRestrictedErrorInfo");
}
#endif // FEATURE_COMINTEROP
}
void GetExceptionForHR(HRESULT hr, IErrorInfo* pErrInfo, bool fUseCOMException, OBJECTREF* pProtectedThrowable, IRestrictedErrorInfo *pResErrorInfo, BOOL bHasLangRestrictedErrInfo)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(IsProtectedByGCFrame(pProtectedThrowable));
}
CONTRACTL_END;
// Initialize
*pProtectedThrowable = NULL;
#if defined(FEATURE_COMINTEROP) && !defined(CROSSGEN_COMPILE)
if (pErrInfo != NULL)
{
// If this represents a managed object...
// ...then get the managed exception object and also check if it is a __ComObject...
if (IsManagedObject(pErrInfo))
{
GetObjectRefFromComIP(pProtectedThrowable, pErrInfo);
if ((*pProtectedThrowable) != NULL)
{
// ...if it is, then we'll just default to an exception based on the IErrorInfo.
if ((*pProtectedThrowable)->GetMethodTable()->IsComObjectType())
{
(*pProtectedThrowable) = NULL;
}
else
{
// We have created an exception. Release the IErrorInfo
ULONG cbRef = SafeRelease(pErrInfo);
LogInteropRelease(pErrInfo, cbRef, "IErrorInfo release");
return;
}
}
}
// If we got here and we don't have an exception object, we have a native IErrorInfo or
// a managed __ComObject based IErrorInfo, so we'll just create an exception based on
// the native IErrorInfo.
if ((*pProtectedThrowable) == NULL)
{
EECOMException ex(hr, pErrInfo, fUseCOMException, pResErrorInfo, bHasLangRestrictedErrInfo COMMA_INDEBUG(FALSE));
(*pProtectedThrowable) = ex.GetThrowable();
}
}
#endif // defined(FEATURE_COMINTEROP) && !defined(CROSSGEN_COMPILE)
// If we made it here and we don't have an exception object, we didn't have a valid IErrorInfo
// so we'll create an exception based solely on the hresult.
if ((*pProtectedThrowable) == NULL)
{
EEMessageException ex(hr, fUseCOMException);
(*pProtectedThrowable) = ex.GetThrowable();
}
}
void GetExceptionForHR(HRESULT hr, IErrorInfo* pErrInfo, OBJECTREF* pProtectedThrowable)
{
WRAPPER_NO_CONTRACT;
GetExceptionForHR(hr, pErrInfo, true, pProtectedThrowable);
}
void GetExceptionForHR(HRESULT hr, OBJECTREF* pProtectedThrowable)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS; // because of IErrorInfo
MODE_ANY;
}
CONTRACTL_END;
// Get an IErrorInfo if one is available.
IErrorInfo *pErrInfo = NULL;
#ifndef CROSSGEN_COMPILE
if (SafeGetErrorInfo(&pErrInfo) != S_OK)
pErrInfo = NULL;
#endif
GetExceptionForHR(hr, pErrInfo, true, pProtectedThrowable);
}
//
// Maps a Win32 fault to a COM+ Exception enumeration code
//
DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord)
{
WRAPPER_NO_CONTRACT;
switch (pExceptionRecord->ExceptionCode)
{
case STATUS_FLOAT_INEXACT_RESULT:
case STATUS_FLOAT_INVALID_OPERATION:
case STATUS_FLOAT_STACK_CHECK:
case STATUS_FLOAT_UNDERFLOW:
return (DWORD) kArithmeticException;
case STATUS_FLOAT_OVERFLOW:
case STATUS_INTEGER_OVERFLOW:
return (DWORD) kOverflowException;
case STATUS_FLOAT_DIVIDE_BY_ZERO:
case STATUS_INTEGER_DIVIDE_BY_ZERO:
return (DWORD) kDivideByZeroException;
case STATUS_FLOAT_DENORMAL_OPERAND:
return (DWORD) kFormatException;
case STATUS_ACCESS_VIOLATION:
{
// We have a config key, InsecurelyTreatAVsAsNullReference, that ensures we always translate to
// NullReferenceException instead of doing the new AV translation logic.
if ((g_pConfig != NULL) && !g_pConfig->LegacyNullReferenceExceptionPolicy())
{
#if defined(FEATURE_HIJACK) && !defined(PLATFORM_UNIX)
// If we got the exception on a redirect function it means the original exception happened in managed code:
if (Thread::IsAddrOfRedirectFunc(pExceptionRecord->ExceptionAddress))
return (DWORD) kNullReferenceException;
if (pExceptionRecord->ExceptionAddress == (LPVOID)GetEEFuncEntryPoint(THROW_CONTROL_FOR_THREAD_FUNCTION))
{
return (DWORD) kNullReferenceException;
}
#endif // FEATURE_HIJACK && !PLATFORM_UNIX
// If the IP of the AV is not in managed code, then its an AccessViolationException.
if (!ExecutionManager::IsManagedCode((PCODE)pExceptionRecord->ExceptionAddress))
{
return (DWORD) kAccessViolationException;
}
// If the address accessed is above 64k (Windows) or page size (PAL), then its an AccessViolationException.
// Note: Win9x is a little different... it never gives you the proper address of the read or write that caused
// the fault. It always gives -1, so we can't use it as part of the decision... just give
// NullReferenceException instead.
if (pExceptionRecord->ExceptionInformation[1] >= NULL_AREA_SIZE)
{
return (DWORD) kAccessViolationException;
}
}
return (DWORD) kNullReferenceException;
}
case STATUS_ARRAY_BOUNDS_EXCEEDED:
return (DWORD) kIndexOutOfRangeException;
case STATUS_NO_MEMORY:
return (DWORD) kOutOfMemoryException;
case STATUS_STACK_OVERFLOW:
return (DWORD) kStackOverflowException;
#ifdef ALIGN_ACCESS
case STATUS_DATATYPE_MISALIGNMENT:
return (DWORD) kDataMisalignedException;
#endif // ALIGN_ACCESS
default:
return kSEHException;
}
}
#ifdef _DEBUG
#ifndef WIN64EXCEPTIONS
// check if anyone has written to the stack above the handler which would wipe out the EH registration
void CheckStackBarrier(EXCEPTION_REGISTRATION_RECORD *exRecord)
{
LIMITED_METHOD_CONTRACT;
if (exRecord->Handler != (PEXCEPTION_ROUTINE)COMPlusFrameHandler)
return;
DWORD *stackOverwriteBarrier = (DWORD *)((BYTE*)exRecord - offsetof(FrameHandlerExRecordWithBarrier, m_ExRecord));
for (int i =0; i < STACK_OVERWRITE_BARRIER_SIZE; i++) {
if (*(stackOverwriteBarrier+i) != STACK_OVERWRITE_BARRIER_VALUE) {
// to debug this error, you must determine who erroneously overwrote the stack
_ASSERTE(!"Fatal error: the stack has been overwritten");
}
}
}
#endif // WIN64EXCEPTIONS
#endif // _DEBUG
//-------------------------------------------------------------------------
// A marker for JIT -> EE transition when we know we're in preemptive
// gc mode. As we leave the EE, we fix a few things:
//
// - the gc state must be set back to preemptive-operative
// - the COM+ frame chain must be rewound to what it was on entry
// - ExInfo()->m_pSearchBoundary must be adjusted
// if we popped the frame that is identified as begnning the next
// crawl.
//-------------------------------------------------------------------------
void COMPlusCooperativeTransitionHandler(Frame* pFrame)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
LOG((LF_EH, LL_INFO1000, "COMPlusCooprativeTransitionHandler unwinding\n"));
{
Thread* pThread = GetThread();
// Restore us to cooperative gc mode.
GCX_COOP();
// Pop the frame chain.
UnwindFrameChain(pThread, pFrame);
CONSISTENCY_CHECK(pFrame == pThread->GetFrame());
#ifndef WIN64EXCEPTIONS
// An exception is being thrown through here. The COM+ exception
// info keeps a pointer to a frame that is used by the next
// COM+ Exception Handler as the starting point of its crawl.
// We may have popped this marker -- in which case, we need to
// update it to the current frame.
//
ThreadExceptionState* pExState = pThread->GetExceptionState();
Frame* pSearchBoundary = NULL;
if (pThread->IsExceptionInProgress())
{
pSearchBoundary = pExState->m_currentExInfo.m_pSearchBoundary;
}
if (pSearchBoundary && pSearchBoundary < pFrame)
{
LOG((LF_EH, LL_INFO1000, "\tpExInfo->m_pSearchBoundary = %08x\n", (void*)pFrame));
pExState->m_currentExInfo.m_pSearchBoundary = pFrame;
}
#endif // WIN64EXCEPTIONS
}
// Restore us to preemptive gc mode.
GCX_PREEMP_NO_DTOR();
}
void StackTraceInfo::Init()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
SO_TOLERANT;
}
CONTRACTL_END;
LOG((LF_EH, LL_INFO10000, "StackTraceInfo::Init (%p)\n", this));
m_pStackTrace = NULL;
m_cStackTrace = 0;
m_dFrameCount = 0;
m_cDynamicMethodItems = 0;
m_dCurrentDynamicIndex = 0;
}
void StackTraceInfo::FreeStackTrace()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
SO_TOLERANT;
}
CONTRACTL_END;
if (m_pStackTrace)
{
delete [] m_pStackTrace;
m_pStackTrace = NULL;
m_cStackTrace = 0;
m_dFrameCount = 0;
m_cDynamicMethodItems = 0;
m_dCurrentDynamicIndex = 0;
}
}
BOOL StackTraceInfo::IsEmpty()
{
LIMITED_METHOD_CONTRACT;
return 0 == m_dFrameCount;
}
void StackTraceInfo::ClearStackTrace()
{
LIMITED_METHOD_CONTRACT;
LOG((LF_EH, LL_INFO1000, "StackTraceInfo::ClearStackTrace (%p)\n", this));
m_dFrameCount = 0;
}
// allocate stack trace info. As each function is found in the stack crawl, it will be added
// to this list. If the list is too small, it is reallocated.
void StackTraceInfo::AllocateStackTrace()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_FORBID_FAULT;
LOG((LF_EH, LL_INFO1000, "StackTraceInfo::AllocateStackTrace (%p)\n", this));
if (!m_pStackTrace)
{
#ifdef _DEBUG
unsigned int allocSize = 2; // make small to exercise realloc
#else
unsigned int allocSize = 30;
#endif
SCAN_IGNORE_FAULT; // A fault of new is okay here. The rest of the system is cool if we don't have enough
// memory to remember the stack as we run our first pass.
m_pStackTrace = new (nothrow) StackTraceElement[allocSize];
if (m_pStackTrace != NULL)
{
// Remember how much we allocated.
m_cStackTrace = allocSize;
m_cDynamicMethodItems = allocSize;
}
else
{
m_cStackTrace = 0;
m_cDynamicMethodItems = 0;
}
}
}
//
// Returns true if it appended the element, false otherwise.
//
BOOL StackTraceInfo::AppendElement(BOOL bAllowAllocMem, UINT_PTR currentIP, UINT_PTR currentSP, MethodDesc* pFunc, CrawlFrame* pCf)
{
CONTRACTL
{
GC_TRIGGERS;
NOTHROW;
}
CONTRACTL_END
LOG((LF_EH, LL_INFO10000, "StackTraceInfo::AppendElement (%p), IP = %p, SP = %p, %s::%s\n", this, currentIP, currentSP, pFunc ? pFunc->m_pszDebugClassName : "", pFunc ? pFunc->m_pszDebugMethodName : "" ));
BOOL bRetVal = FALSE;
if (pFunc != NULL && pFunc->IsILStub())
return FALSE;
// Save this function in the stack trace array, which we only build on the first pass. We'll try to expand the
// stack trace array if we don't have enough room. Note that we only try to expand if we're allowed to allocate
// memory (bAllowAllocMem).
if (bAllowAllocMem && (m_dFrameCount >= m_cStackTrace))
{
StackTraceElement* pTempElement = new (nothrow) StackTraceElement[m_cStackTrace*2];
if (pTempElement != NULL)
{
memcpy(pTempElement, m_pStackTrace, m_cStackTrace * sizeof(StackTraceElement));
delete [] m_pStackTrace;
m_pStackTrace = pTempElement;
m_cStackTrace *= 2;
}
}
// Add the function to the stack trace array if there's room.
if (m_dFrameCount < m_cStackTrace)
{
StackTraceElement* pStackTraceElem;
// If we get in here, we'd better have a stack trace array.
CONSISTENCY_CHECK(m_pStackTrace != NULL);
pStackTraceElem = &(m_pStackTrace[m_dFrameCount]);
pStackTraceElem->pFunc = pFunc;
pStackTraceElem->ip = currentIP;
pStackTraceElem->sp = currentSP;
// When we are building stack trace as we encounter managed frames during exception dispatch,
// then none of those frames represent a stack trace from a foreign exception (as they represent
// the current exception). Hence, set the corresponding flag to FALSE.
pStackTraceElem->fIsLastFrameFromForeignStackTrace = FALSE;
// This is a workaround to fix the generation of stack traces from exception objects so that
// they point to the line that actually generated the exception instead of the line
// following.
if (!(pCf->HasFaulted() || pCf->IsIPadjusted()) && pStackTraceElem->ip != 0)
{
pStackTraceElem->ip -= 1;
}
++m_dFrameCount;
bRetVal = TRUE;
COUNTER_ONLY(GetPerfCounters().m_Excep.cThrowToCatchStackDepth++);
}
#ifndef FEATURE_PAL // Watson is supported on Windows only
Thread *pThread = GetThread();
_ASSERTE(pThread);
if (pThread && (currentIP != 0))
{
// Setup the watson bucketing details for the initial throw
// callback only if we dont already have them.
ThreadExceptionState *pExState = pThread->GetExceptionState();
if (!pExState->GetFlags()->GotWatsonBucketDetails())
{
// Adjust the IP if necessary.
UINT_PTR adjustedIp = currentIP;
// This is a workaround copied from above.
if (!(pCf->HasFaulted() || pCf->IsIPadjusted()) && adjustedIp != 0)
{
adjustedIp -= 1;
}
// Setup the bucketing details for the initial throw
SetupInitialThrowBucketDetails(adjustedIp);
}
}
#endif // !FEATURE_PAL
return bRetVal;
}
void StackTraceInfo::GetLeafFrameInfo(StackTraceElement* pStackTraceElement)
{
LIMITED_METHOD_CONTRACT;
if (NULL == m_pStackTrace)
{
return;
}
_ASSERTE(NULL != pStackTraceElement);
*pStackTraceElement = m_pStackTrace[0];
}
void UnwindFrameChain(Thread* pThread, LPVOID pvLimitSP)
{
CONTRACTL
{
NOTHROW;
DISABLED(GC_TRIGGERS); // some Frames' ExceptionUnwind methods trigger :(
MODE_ANY;
SO_TOLERANT;
}
CONTRACTL_END;
// @todo - Remove this and add a hard SO probe as can't throw from here.
CONTRACT_VIOLATION(SOToleranceViolation);
Frame* pFrame = pThread->m_pFrame;
if (pFrame < pvLimitSP)
{
GCX_COOP_THREAD_EXISTS(pThread);
//
// call ExceptionUnwind with the Frame chain intact
//
pFrame = pThread->NotifyFrameChainOfExceptionUnwind(pFrame, pvLimitSP);
//
// now pop the frames off by trimming the Frame chain
//
pThread->SetFrame(pFrame);
}
}
BOOL IsExceptionOfType(RuntimeExceptionKind reKind, Exception *pException)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_FORBID_FAULT;
if (pException->IsType(reKind))
return TRUE;
if (pException->IsType(CLRException::GetType()))
{
// Since we're going to be holding onto the Throwable object we
// need to be in COOPERATIVE.
GCX_COOP();
OBJECTREF Throwable=((CLRException*)pException)->GetThrowable();
GCX_FORBID();
if (IsExceptionOfType(reKind, &Throwable))
return TRUE;
}
return FALSE;
}
BOOL IsExceptionOfType(RuntimeExceptionKind reKind, OBJECTREF *pThrowable)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_FORBID_FAULT;
_ASSERTE(pThrowable != NULL);
if (*pThrowable == NULL)
return FALSE;
MethodTable *pThrowableMT = (*pThrowable)->GetTrueMethodTable();
// IsExceptionOfType is supported for mscorlib exception types only
_ASSERTE(reKind <= kLastExceptionInMscorlib);
return MscorlibBinder::IsException(pThrowableMT, reKind);
}
BOOL IsAsyncThreadException(OBJECTREF *pThrowable) {
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_FORBID_FAULT;
if ( (GetThread() && GetThread()->IsRudeAbort() && GetThread()->IsRudeAbortInitiated())
||IsExceptionOfType(kThreadAbortException, pThrowable)
||IsExceptionOfType(kThreadInterruptedException, pThrowable)) {
return TRUE;
} else {
return FALSE;
}
}
BOOL IsUncatchable(OBJECTREF *pThrowable)
{
CONTRACTL {
SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
FORBID_FAULT;
} CONTRACTL_END;
_ASSERTE(pThrowable != NULL);
Thread *pThread = GetThread();
if (pThread)
{
if (pThread->IsAbortInitiated())
return TRUE;
if (OBJECTREFToObject(*pThrowable)->GetMethodTable() == g_pExecutionEngineExceptionClass)
return TRUE;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
// Corrupting exceptions are also uncatchable
if (CEHelper::IsProcessCorruptedStateException(*pThrowable))
{
return TRUE;
}
#endif //FEATURE_CORRUPTING_EXCEPTIONS
}
return FALSE;
}
BOOL IsStackOverflowException(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord)
{
if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
{
return true;
}
if (IsComPlusException(pExceptionRecord) &&
pThread->IsLastThrownObjectStackOverflowException())
{
return true;
}
return false;
}
#ifdef _DEBUG
BOOL IsValidClause(EE_ILEXCEPTION_CLAUSE *EHClause)
{
LIMITED_METHOD_CONTRACT;
#if 0
DWORD valid = COR_ILEXCEPTION_CLAUSE_FILTER | COR_ILEXCEPTION_CLAUSE_FINALLY |
COR_ILEXCEPTION_CLAUSE_FAULT | COR_ILEXCEPTION_CLAUSE_CACHED_CLASS;
// <TODO>@NICE: enable this when VC stops generatng a bogus 0x8000.</TODO>
if (EHClause->Flags & ~valid)
return FALSE;
#endif
if (EHClause->TryStartPC > EHClause->TryEndPC)
return FALSE;
return TRUE;
}
#endif
#ifdef DEBUGGING_SUPPORTED
LONG NotifyDebuggerLastChance(Thread *pThread,
EXCEPTION_POINTERS *pExceptionInfo,
BOOL jitAttachRequested)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
LONG retval = EXCEPTION_CONTINUE_SEARCH;
// Debugger does func-evals inside this call, which may take nested exceptions. We need a nested exception
// handler to allow this.
INSTALL_NESTED_EXCEPTION_HANDLER(pThread->GetFrame());
EXCEPTION_POINTERS dummy;
dummy.ExceptionRecord = NULL;
dummy.ContextRecord = NULL;
if (NULL == pExceptionInfo)
{
pExceptionInfo = &dummy;
}
else if (NULL != pExceptionInfo->ExceptionRecord && NULL == pExceptionInfo->ContextRecord)
{
// In a soft stack overflow, we have an exception record but not a context record.
// Debugger::LastChanceManagedException requires that both ExceptionRecord and
// ContextRecord be valid or both be NULL.
pExceptionInfo = &dummy;
}
if (g_pDebugInterface && g_pDebugInterface->LastChanceManagedException(pExceptionInfo,
pThread,
jitAttachRequested) == ExceptionContinueExecution)
{
retval = EXCEPTION_CONTINUE_EXECUTION;
}
UNINSTALL_NESTED_EXCEPTION_HANDLER();
#ifdef DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED
EX_TRY
{
// if the debugger wants to intercept the unhandled exception then we immediately unwind without returning
// If there is a problem with this function unwinding here it could be separated out however
// we need to be very careful. Previously we had the opposite problem in that we notified the debugger
// of an unhandled exception and then either:
// a) never gave the debugger a chance to intercept later, or
// b) code changed more process state unaware that the debugger would be handling the exception
if ((pThread->IsExceptionInProgress()) && pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo())
{
// The debugger wants to intercept this exception. It may return in a failure case, in which case we want
// to continue thru this path.
ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_CHAIN_END) pExceptionInfo->ExceptionRecord);
}
}
EX_CATCH // if we fail to intercept just continue as is
{
}
EX_END_CATCH(SwallowAllExceptions);
#endif // DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED
return retval;
}
#ifndef FEATURE_PAL
//----------------------------------------------------------------------------
//
// DoReportFault - wrapper for ReportFault in FaultRep.dll, which also handles
// debugger launch synchronization if the user chooses to launch
// a debugger
//
// Arguments:
// pExceptionInfo - pointer to exception info
//
// Return Value:
// The returned EFaultRepRetVal value from ReportFault
//
// Note:
//
//----------------------------------------------------------------------------
EFaultRepRetVal DoReportFault(EXCEPTION_POINTERS * pExceptionInfo)
{
LIMITED_METHOD_CONTRACT;
HINSTANCE hmod = WszLoadLibrary(W("FaultRep.dll"</