Permalink
Fetching contributors…
Cannot retrieve contributors at this time
10249 lines (8642 sloc) 353 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
// File: methodtable.cpp
//
//
//
// ============================================================================
#include "common.h"
#include "clsload.hpp"
#include "method.hpp"
#include "class.h"
#include "classcompat.h"
#include "object.h"
#include "field.h"
#include "util.hpp"
#include "excep.h"
#include "siginfo.hpp"
#include "threads.h"
#include "stublink.h"
#include "ecall.h"
#include "dllimport.h"
#include "gcdesc.h"
#include "jitinterface.h"
#include "eeconfig.h"
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
#include "gcheaputilities.h"
#include "dbginterface.h"
#include "comdelegate.h"
#include "eventtrace.h"
#include "fieldmarshaler.h"
#include "eeprofinterfaces.h"
#include "dllimportcallback.h"
#include "listlock.h"
#include "methodimpl.h"
#include "guidfromname.h"
#include "stackprobe.h"
#include "encee.h"
#include "encee.h"
#include "comsynchronizable.h"
#include "customattribute.h"
#include "virtualcallstub.h"
#include "contractimpl.h"
#ifdef FEATURE_PREJIT
#include "zapsig.h"
#endif //FEATURE_PREJIT
#ifdef FEATURE_COMINTEROP
#include "comcallablewrapper.h"
#include "clrtocomcall.h"
#include "runtimecallablewrapper.h"
#include "winrttypenameconverter.h"
#endif // FEATURE_COMINTEROP
#ifdef FEATURE_TYPEEQUIVALENCE
#include "typeequivalencehash.hpp"
#endif
#include "generics.h"
#include "genericdict.h"
#include "typestring.h"
#include "typedesc.h"
#include "array.h"
#ifdef FEATURE_INTERPRETER
#include "interpreter.h"
#endif // FEATURE_INTERPRETER
#ifndef DACCESS_COMPILE
// Typedef for string comparition functions.
typedef int (__cdecl *UTF8StringCompareFuncPtr)(const char *, const char *);
MethodDataCache *MethodTable::s_pMethodDataCache = NULL;
BOOL MethodTable::s_fUseMethodDataCache = FALSE;
BOOL MethodTable::s_fUseParentMethodData = FALSE;
#ifdef _DEBUG
extern unsigned g_dupMethods;
#endif
#endif // !DACCESS_COMPILE
#ifndef DACCESS_COMPILE
//==========================================================================================
class MethodDataCache
{
typedef MethodTable::MethodData MethodData;
public: // Ctor. Allocates cEntries entries. Throws.
static UINT32 GetObjectSize(UINT32 cEntries);
MethodDataCache(UINT32 cEntries);
MethodData *Find(MethodTable *pMT);
MethodData *Find(MethodTable *pMTDecl, MethodTable *pMTImpl);
void Insert(MethodData *pMData);
void Clear();
protected:
// This describes each entry in the cache.
struct Entry
{
MethodData *m_pMData;
UINT32 m_iTimestamp;
};
MethodData *FindHelper(MethodTable *pMTDecl, MethodTable *pMTImpl, UINT32 idx);
inline UINT32 GetNextTimestamp()
{ return ++m_iCurTimestamp; }
inline UINT32 NumEntries()
{ LIMITED_METHOD_CONTRACT; return m_cEntries; }
inline void TouchEntry(UINT32 i)
{ WRAPPER_NO_CONTRACT; m_iLastTouched = i; GetEntry(i)->m_iTimestamp = GetNextTimestamp(); }
inline UINT32 GetLastTouchedEntryIndex()
{ WRAPPER_NO_CONTRACT; return m_iLastTouched; }
// The end of this object contains an array of Entry
inline Entry *GetEntryData()
{ LIMITED_METHOD_CONTRACT; return (Entry *)(this + 1); }
inline Entry *GetEntry(UINT32 i)
{ WRAPPER_NO_CONTRACT; return GetEntryData() + i; }
private:
// This serializes access to the cache
SimpleRWLock m_lock;
// This allows ageing of entries to decide which to punt when
// inserting a new entry.
UINT32 m_iCurTimestamp;
// The number of entries in the cache
UINT32 m_cEntries;
UINT32 m_iLastTouched;
#ifdef _WIN64
UINT32 pad; // insures that we are a multiple of 8-bytes
#endif
}; // class MethodDataCache
//==========================================================================================
UINT32 MethodDataCache::GetObjectSize(UINT32 cEntries)
{
LIMITED_METHOD_CONTRACT;
return sizeof(MethodDataCache) + (sizeof(Entry) * cEntries);
}
//==========================================================================================
MethodDataCache::MethodDataCache(UINT32 cEntries)
: m_lock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT),
m_iCurTimestamp(0),
m_cEntries(cEntries),
m_iLastTouched(0)
{
WRAPPER_NO_CONTRACT;
ZeroMemory(GetEntryData(), cEntries * sizeof(Entry));
}
//==========================================================================================
MethodTable::MethodData *MethodDataCache::FindHelper(
MethodTable *pMTDecl, MethodTable *pMTImpl, UINT32 idx)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
INSTANCE_CHECK;
} CONTRACTL_END;
MethodData *pEntry = GetEntry(idx)->m_pMData;
if (pEntry != NULL) {
MethodTable *pMTDeclEntry = pEntry->GetDeclMethodTable();
MethodTable *pMTImplEntry = pEntry->GetImplMethodTable();
if (pMTDeclEntry == pMTDecl && pMTImplEntry == pMTImpl) {
return pEntry;
}
else if (pMTDecl == pMTImpl) {
if (pMTDeclEntry == pMTDecl) {
return pEntry->GetDeclMethodData();
}
if (pMTImplEntry == pMTDecl) {
return pEntry->GetImplMethodData();
}
}
}
return NULL;
}
//==========================================================================================
MethodTable::MethodData *MethodDataCache::Find(MethodTable *pMTDecl, MethodTable *pMTImpl)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
INSTANCE_CHECK;
} CONTRACTL_END;
#ifdef LOGGING
g_sdStats.m_cCacheLookups++;
#endif
SimpleReadLockHolder lh(&m_lock);
// Check the last touched entry.
MethodData *pEntry = FindHelper(pMTDecl, pMTImpl, GetLastTouchedEntryIndex());
// Now search the entire cache.
if (pEntry == NULL) {
for (UINT32 i = 0; i < NumEntries(); i++) {
pEntry = FindHelper(pMTDecl, pMTImpl, i);
if (pEntry != NULL) {
TouchEntry(i);
break;
}
}
}
if (pEntry != NULL) {
pEntry->AddRef();
}
#ifdef LOGGING
else {
// Failure to find the entry in the cache.
g_sdStats.m_cCacheMisses++;
}
#endif // LOGGING
return pEntry;
}
//==========================================================================================
MethodTable::MethodData *MethodDataCache::Find(MethodTable *pMT)
{
WRAPPER_NO_CONTRACT;
return Find(pMT, pMT);
}
//==========================================================================================
void MethodDataCache::Insert(MethodData *pMData)
{
CONTRACTL {
NOTHROW; // for now, because it does not yet resize.
GC_NOTRIGGER;
INSTANCE_CHECK;
} CONTRACTL_END;
SimpleWriteLockHolder hLock(&m_lock);
UINT32 iMin = UINT32_MAX;
UINT32 idxMin = UINT32_MAX;
for (UINT32 i = 0; i < NumEntries(); i++) {
if (GetEntry(i)->m_iTimestamp < iMin) {
idxMin = i;
iMin = GetEntry(i)->m_iTimestamp;
}
}
Entry *pEntry = GetEntry(idxMin);
if (pEntry->m_pMData != NULL) {
pEntry->m_pMData->Release();
}
pMData->AddRef();
pEntry->m_pMData = pMData;
pEntry->m_iTimestamp = GetNextTimestamp();
}
//==========================================================================================
void MethodDataCache::Clear()
{
CONTRACTL {
NOTHROW; // for now, because it does not yet resize.
GC_NOTRIGGER;
INSTANCE_CHECK;
} CONTRACTL_END;
// Taking the lock here is just a precaution. Really, the runtime
// should be suspended because this is called while unloading an
// AppDomain at the SysSuspendEE stage. But, if someone calls it
// outside of that context, we should be extra cautious.
SimpleWriteLockHolder lh(&m_lock);
for (UINT32 i = 0; i < NumEntries(); i++) {
Entry *pEntry = GetEntry(i);
if (pEntry->m_pMData != NULL) {
pEntry->m_pMData->Release();
}
}
ZeroMemory(GetEntryData(), NumEntries() * sizeof(Entry));
m_iCurTimestamp = 0;
} // MethodDataCache::Clear
#endif // !DACCESS_COMPILE
//==========================================================================================
//
// Initialize the offsets of multipurpose slots at compile time using template metaprogramming
//
template<int N>
struct CountBitsAtCompileTime
{
enum { value = (N & 1) + CountBitsAtCompileTime<(N >> 1)>::value };
};
template<>
struct CountBitsAtCompileTime<0>
{
enum { value = 0 };
};
// "mask" is mask of used slots.
template<int mask>
struct MethodTable::MultipurposeSlotOffset
{
// This is raw index of the slot assigned on first come first served basis
enum { raw = CountBitsAtCompileTime<mask>::value };
// This is actual index of the slot. It is equal to raw index except for the case
// where the first fixed slot is not used, but the second one is. The first fixed
// slot has to be assigned instead of the second one in this case. This assumes that
// there are exactly two fixed slots.
enum { index = (((mask & 3) == 2) && (raw == 1)) ? 0 : raw };
// Offset of slot
enum { slotOffset = (index == 0) ? offsetof(MethodTable, m_pMultipurposeSlot1) :
(index == 1) ? offsetof(MethodTable, m_pMultipurposeSlot2) :
(sizeof(MethodTable) + index * sizeof(TADDR) - 2 * sizeof(TADDR)) };
// Size of methodtable with overflow slots. It is used to compute start offset of optional members.
enum { totalSize = (slotOffset >= sizeof(MethodTable)) ? slotOffset : sizeof(MethodTable) };
};
//
// These macros recursively expand to create 2^N values for the offset arrays
//
#define MULTIPURPOSE_SLOT_OFFSET_1(mask) MULTIPURPOSE_SLOT_OFFSET (mask) MULTIPURPOSE_SLOT_OFFSET (mask | 0x01)
#define MULTIPURPOSE_SLOT_OFFSET_2(mask) MULTIPURPOSE_SLOT_OFFSET_1(mask) MULTIPURPOSE_SLOT_OFFSET_1(mask | 0x02)
#define MULTIPURPOSE_SLOT_OFFSET_3(mask) MULTIPURPOSE_SLOT_OFFSET_2(mask) MULTIPURPOSE_SLOT_OFFSET_2(mask | 0x04)
#define MULTIPURPOSE_SLOT_OFFSET_4(mask) MULTIPURPOSE_SLOT_OFFSET_3(mask) MULTIPURPOSE_SLOT_OFFSET_3(mask | 0x08)
#define MULTIPURPOSE_SLOT_OFFSET_5(mask) MULTIPURPOSE_SLOT_OFFSET_4(mask) MULTIPURPOSE_SLOT_OFFSET_4(mask | 0x10)
#define MULTIPURPOSE_SLOT_OFFSET(mask) MultipurposeSlotOffset<mask>::slotOffset,
const BYTE MethodTable::c_DispatchMapSlotOffsets[] = {
MULTIPURPOSE_SLOT_OFFSET_2(0)
};
const BYTE MethodTable::c_NonVirtualSlotsOffsets[] = {
MULTIPURPOSE_SLOT_OFFSET_3(0)
};
const BYTE MethodTable::c_ModuleOverrideOffsets[] = {
MULTIPURPOSE_SLOT_OFFSET_4(0)
};
#undef MULTIPURPOSE_SLOT_OFFSET
#define MULTIPURPOSE_SLOT_OFFSET(mask) MultipurposeSlotOffset<mask>::totalSize,
const BYTE MethodTable::c_OptionalMembersStartOffsets[] = {
MULTIPURPOSE_SLOT_OFFSET_5(0)
};
#undef MULTIPURPOSE_SLOT_OFFSET
//==========================================================================================
// Optimization intended for MethodTable::GetModule, MethodTable::GetDispatchMap and MethodTable::GetNonVirtualSlotsPtr
#include <optsmallperfcritical.h>
PTR_Module MethodTable::GetModule()
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogMethodTableAccess(this);
// Fast path for non-generic non-array case
if ((m_dwFlags & (enum_flag_HasComponentSize | enum_flag_GenericsMask)) == 0)
return GetLoaderModule();
MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
if (!pMTForModule->HasModuleOverride())
return pMTForModule->GetLoaderModule();
TADDR pSlot = pMTForModule->GetMultipurposeSlotPtr(enum_flag_HasModuleOverride, c_ModuleOverrideOffsets);
return RelativeFixupPointer<PTR_Module>::GetValueAtPtr(pSlot);
}
//==========================================================================================
PTR_Module MethodTable::GetModule_NoLogging()
{
LIMITED_METHOD_DAC_CONTRACT;
// Fast path for non-generic non-array case
if ((m_dwFlags & (enum_flag_HasComponentSize | enum_flag_GenericsMask)) == 0)
return GetLoaderModule();
MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
if (!pMTForModule->HasModuleOverride())
return pMTForModule->GetLoaderModule();
TADDR pSlot = pMTForModule->GetMultipurposeSlotPtr(enum_flag_HasModuleOverride, c_ModuleOverrideOffsets);
return RelativeFixupPointer<PTR_Module>::GetValueAtPtr(pSlot);
}
//==========================================================================================
PTR_DispatchMap MethodTable::GetDispatchMap()
{
LIMITED_METHOD_DAC_CONTRACT;
MethodTable * pMT = this;
if (!pMT->HasDispatchMapSlot())
{
pMT = pMT->GetCanonicalMethodTable();
if (!pMT->HasDispatchMapSlot())
return NULL;
}
g_IBCLogger.LogDispatchMapAccess(pMT);
TADDR pSlot = pMT->GetMultipurposeSlotPtr(enum_flag_HasDispatchMapSlot, c_DispatchMapSlotOffsets);
return RelativePointer<PTR_DispatchMap>::GetValueAtPtr(pSlot);
}
//==========================================================================================
TADDR MethodTable::GetNonVirtualSlotsPtr()
{
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(GetFlag(enum_flag_HasNonVirtualSlots));
return GetMultipurposeSlotPtr(enum_flag_HasNonVirtualSlots, c_NonVirtualSlotsOffsets);
}
#include <optdefault.h>
//==========================================================================================
PTR_Module MethodTable::GetModuleIfLoaded()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
SUPPORTS_DAC;
}
CONTRACTL_END;
g_IBCLogger.LogMethodTableAccess(this);
MethodTable * pMTForModule = IsArray() ? this : GetCanonicalMethodTable();
if (!pMTForModule->HasModuleOverride())
return pMTForModule->GetLoaderModule();
return Module::RestoreModulePointerIfLoaded(pMTForModule->GetModuleOverridePtr(), pMTForModule->GetLoaderModule());
}
#ifndef DACCESS_COMPILE
//==========================================================================================
void MethodTable::SetModule(Module * pModule)
{
LIMITED_METHOD_CONTRACT;
if (HasModuleOverride())
{
GetModuleOverridePtr()->SetValue(pModule);
}
_ASSERTE(GetModule() == pModule);
}
#endif // DACCESS_COMPILE
//==========================================================================================
BOOL MethodTable::ValidateWithPossibleAV()
{
CANNOT_HAVE_CONTRACT;
SUPPORTS_DAC;
// MethodTables have the canonicalization property below.
// i.e. canonicalize, and canonicalize again, and check the result are
// the same. This is a property that holds for every single valid object in
// the system, but which should hold for very few other addresses.
// For non-generic classes, we can rely on comparing
// object->methodtable->class->methodtable
// to
// object->methodtable
//
// However, for generic instantiation this does not work. There we must
// compare
//
// object->methodtable->class->methodtable->class
// to
// object->methodtable->class
//
// Of course, that's not necessarily enough to verify that the method
// table and class are absolutely valid - we rely on type soundness
// for that. We need to do more sanity checking to
// make sure that our pointer here is in fact a valid object.
PTR_EEClass pEEClass = this->GetClassWithPossibleAV();
return ((this == pEEClass->GetMethodTableWithPossibleAV()) ||
((HasInstantiation() || IsArray()) &&
(pEEClass->GetMethodTableWithPossibleAV()->GetClassWithPossibleAV() == pEEClass)));
}
#ifndef DACCESS_COMPILE
//==========================================================================================
BOOL MethodTable::IsClassInited(AppDomain* pAppDomain /* = NULL */)
{
WRAPPER_NO_CONTRACT;
if (IsClassPreInited())
return TRUE;
if (IsSharedByGenericInstantiations())
return FALSE;
DomainLocalModule *pLocalModule;
if (pAppDomain == NULL)
{
pLocalModule = GetDomainLocalModule();
}
else
{
pLocalModule = GetDomainLocalModule(pAppDomain);
}
_ASSERTE(pLocalModule != NULL);
return pLocalModule->IsClassInitialized(this);
}
//==========================================================================================
BOOL MethodTable::IsInitError()
{
WRAPPER_NO_CONTRACT;
DomainLocalModule *pLocalModule = GetDomainLocalModule();
_ASSERTE(pLocalModule != NULL);
return pLocalModule->IsClassInitError(this);
}
//==========================================================================================
// mark the class as having its .cctor run
void MethodTable::SetClassInited()
{
WRAPPER_NO_CONTRACT;
_ASSERTE(!IsClassPreInited() || MscorlibBinder::IsClass(this, CLASS__SHARED_STATICS));
GetDomainLocalModule()->SetClassInitialized(this);
}
//==========================================================================================
void MethodTable::SetClassInitError()
{
WRAPPER_NO_CONTRACT;
GetDomainLocalModule()->SetClassInitError(this);
}
//==========================================================================================
// mark the class as having been restored.
void MethodTable::SetIsRestored()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END
PRECONDITION(!IsFullyLoaded());
// If functions on this type have already been requested for rejit, then give the rejit
// manager a chance to jump-stamp the code we are implicitly restoring. This ensures the
// first thread entering the function will jump to the prestub and trigger the
// rejit. Note that the PublishMethodTableHolder may take a lock to avoid a rejit race.
// See code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode
// for details on the race.
//
{
PublishMethodTableHolder(this);
FastInterlockAnd(EnsureWritablePages(&(GetWriteableDataForWrite()->m_dwFlags)), ~MethodTableWriteableData::enum_flag_Unrestored);
}
#ifndef DACCESS_COMPILE
if (ETW_PROVIDER_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER))
{
ETW::MethodLog::MethodTableRestored(this);
}
#endif
}
//==========================================================================================
// mark as COM object type (System.__ComObject and types deriving from it)
void MethodTable::SetComObjectType()
{
LIMITED_METHOD_CONTRACT;
SetFlag(enum_flag_ComObject);
}
#if defined(FEATURE_TYPEEQUIVALENCE)
void MethodTable::SetHasTypeEquivalence()
{
LIMITED_METHOD_CONTRACT;
SetFlag(enum_flag_HasTypeEquivalence);
}
#endif
#ifdef FEATURE_ICASTABLE
void MethodTable::SetICastable()
{
LIMITED_METHOD_CONTRACT;
SetFlag(enum_flag_ICastable);
}
#endif
BOOL MethodTable::IsICastable()
{
LIMITED_METHOD_DAC_CONTRACT;
#ifdef FEATURE_ICASTABLE
return GetFlag(enum_flag_ICastable);
#else
return FALSE;
#endif
}
#endif // !DACCESS_COMPILE
//==========================================================================================
WORD MethodTable::GetNumMethods()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetClass()->GetNumMethods();
}
//==========================================================================================
PTR_BaseDomain MethodTable::GetDomain()
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogMethodTableAccess(this);
return GetLoaderModule()->GetDomain();
}
//==========================================================================================
BOOL MethodTable::IsDomainNeutral()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_SUPPORTS_DAC;
BOOL ret = GetLoaderModule()->GetAssembly()->IsDomainNeutral();
#ifndef DACCESS_COMPILE
_ASSERTE(!ret == !GetLoaderAllocator()->IsDomainNeutral());
#endif
return ret;
}
//==========================================================================================
BOOL MethodTable::HasSameTypeDefAs(MethodTable *pMT)
{
LIMITED_METHOD_DAC_CONTRACT;
if (this == pMT)
return TRUE;
// optimize for the negative case where we expect RID mismatch
if (GetTypeDefRid() != pMT->GetTypeDefRid())
return FALSE;
if (GetCanonicalMethodTable() == pMT->GetCanonicalMethodTable())
return TRUE;
return (GetModule() == pMT->GetModule());
}
//==========================================================================================
BOOL MethodTable::HasSameTypeDefAs_NoLogging(MethodTable *pMT)
{
LIMITED_METHOD_DAC_CONTRACT;
if (this == pMT)
return TRUE;
// optimize for the negative case where we expect RID mismatch
if (GetTypeDefRid_NoLogging() != pMT->GetTypeDefRid_NoLogging())
return FALSE;
if (GetCanonicalMethodTable() == pMT->GetCanonicalMethodTable())
return TRUE;
return (GetModule_NoLogging() == pMT->GetModule_NoLogging());
}
#ifndef DACCESS_COMPILE
//==========================================================================================
PTR_MethodTable InterfaceInfo_t::GetApproxMethodTable(Module * pContainingModule)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
#ifdef FEATURE_PREJIT
if (m_pMethodTable.IsTagged())
{
// Ideally, we would use Module::RestoreMethodTablePointer here. Unfortunately, it is not
// possible because of the current type loader architecture that restores types incrementally
// even in the NGen case.
MethodTable * pItfMT = *(m_pMethodTable.GetValuePtr());
// Restore the method table, but do not write it back if it has instantiation. We do not want
// to write back the approximate instantiations.
Module::RestoreMethodTablePointerRaw(&pItfMT, pContainingModule, CLASS_LOAD_APPROXPARENTS);
if (!pItfMT->HasInstantiation())
{
// m_pMethodTable.SetValue() is not used here since we want to update the indirection cell
*EnsureWritablePages(m_pMethodTable.GetValuePtr()) = pItfMT;
}
return pItfMT;
}
#endif
MethodTable * pItfMT = m_pMethodTable.GetValue();
ClassLoader::EnsureLoaded(TypeHandle(pItfMT), CLASS_LOAD_APPROXPARENTS);
return pItfMT;
}
#ifndef CROSSGEN_COMPILE
//==========================================================================================
// get the method desc given the interface method desc
/* static */ MethodDesc *MethodTable::GetMethodDescForInterfaceMethodAndServer(
TypeHandle ownerType, MethodDesc *pItfMD, OBJECTREF *pServer)
{
CONTRACT(MethodDesc*)
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pItfMD));
PRECONDITION(pItfMD->IsInterface());
PRECONDITION(!ownerType.IsNull());
PRECONDITION(ownerType.GetMethodTable()->HasSameTypeDefAs(pItfMD->GetMethodTable()));
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
VALIDATEOBJECTREF(*pServer);
#ifdef _DEBUG
MethodTable * pItfMT = ownerType.GetMethodTable();
PREFIX_ASSUME(pItfMT != NULL);
#endif // _DEBUG
MethodTable *pServerMT = (*pServer)->GetMethodTable();
PREFIX_ASSUME(pServerMT != NULL);
if (pServerMT->IsTransparentProxy())
{
// If pServer is a TP, then the interface method desc is the one to
// use to dispatch the call.
RETURN(pItfMD);
}
#ifdef FEATURE_ICASTABLE
// In case of ICastable, instead of trying to find method implementation in the real object type
// we call pObj.GetValueInternal() and call GetMethodDescForInterfaceMethod() again with whatever type it returns.
// It allows objects that implement ICastable to mimic behavior of other types.
if (pServerMT->IsICastable() &&
!pItfMD->HasMethodInstantiation() &&
!TypeHandle(pServerMT).CanCastTo(ownerType)) // we need to make sure object doesn't implement this interface in a natural way
{
GCStress<cfg_any>::MaybeTrigger();
// Make call to ICastableHelpers.GetImplType(obj, interfaceTypeObj)
PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICASTABLEHELPERS__GETIMPLTYPE);
OBJECTREF ownerManagedType = ownerType.GetManagedClassObject(); //GC triggers
DECLARE_ARGHOLDER_ARRAY(args, 2);
args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(*pServer);
args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(ownerManagedType);
OBJECTREF impTypeObj = NULL;
CALL_MANAGED_METHOD_RETREF(impTypeObj, OBJECTREF, args);
INDEBUG(ownerManagedType = NULL); //ownerManagedType wasn't protected during the call
if (impTypeObj == NULL) // GetImplType returns default(RuntimeTypeHandle)
{
COMPlusThrow(kEntryPointNotFoundException);
}
ReflectClassBaseObject* resultTypeObj = ((ReflectClassBaseObject*)OBJECTREFToObject(impTypeObj));
TypeHandle resulTypeHnd = resultTypeObj->GetType();
MethodTable *pResultMT = resulTypeHnd.GetMethodTable();
RETURN(pResultMT->GetMethodDescForInterfaceMethod(ownerType, pItfMD));
}
#endif
#ifdef FEATURE_COMINTEROP
if (pServerMT->IsComObjectType() && !pItfMD->HasMethodInstantiation())
{
// interop needs an exact MethodDesc
pItfMD = MethodDesc::FindOrCreateAssociatedMethodDesc(
pItfMD,
ownerType.GetMethodTable(),
FALSE, // forceBoxedEntryPoint
Instantiation(), // methodInst
FALSE, // allowInstParam
TRUE); // forceRemotableMethod
RETURN(pServerMT->GetMethodDescForComInterfaceMethod(pItfMD, false));
}
#endif // !FEATURE_COMINTEROP
// Handle pure COM+ types.
RETURN (pServerMT->GetMethodDescForInterfaceMethod(ownerType, pItfMD));
}
#ifdef FEATURE_COMINTEROP
//==========================================================================================
// get the method desc given the interface method desc on a COM implemented server
// (if fNullOk is set then NULL is an allowable return value)
MethodDesc *MethodTable::GetMethodDescForComInterfaceMethod(MethodDesc *pItfMD, bool fNullOk)
{
CONTRACT(MethodDesc*)
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pItfMD));
PRECONDITION(pItfMD->IsInterface());
PRECONDITION(IsComObjectType());
POSTCONDITION(fNullOk || CheckPointer(RETVAL));
}
CONTRACT_END;
MethodTable * pItfMT = pItfMD->GetMethodTable();
PREFIX_ASSUME(pItfMT != NULL);
// We now handle __ComObject class that doesn't have Dynamic Interface Map
if (!HasDynamicInterfaceMap())
{
RETURN(pItfMD);
}
else
{
// Now we handle the more complex extensible RCW's. The first thing to do is check
// to see if the static definition of the extensible RCW specifies that the class
// implements the interface.
DWORD slot = (DWORD) -1;
// Calling GetTarget here instead of FindDispatchImpl gives us caching functionality to increase speed.
PCODE tgt = VirtualCallStubManager::GetTarget(
pItfMT->GetLoaderAllocator()->GetDispatchToken(pItfMT->GetTypeID(), pItfMD->GetSlot()), this);
if (tgt != NULL)
{
RETURN(MethodTable::GetMethodDescForSlotAddress(tgt));
}
// The interface is not in the static class definition so we need to look at the
// dynamic interfaces.
else if (FindDynamicallyAddedInterface(pItfMT))
{
// This interface was added to the class dynamically so it is implemented
// by the COM object. We treat this dynamically added interfaces the same
// way we treat COM objects. That is by using the interface vtable.
RETURN(pItfMD);
}
else
{
RETURN(NULL);
}
}
}
#endif // FEATURE_COMINTEROP
#endif // CROSSGEN_COMPILE
//---------------------------------------------------------------------------------------
//
MethodTable* CreateMinimalMethodTable(Module* pContainingModule,
LoaderHeap* pCreationHeap,
AllocMemTracker* pamTracker)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END;
EEClass* pClass = EEClass::CreateMinimalClass(pCreationHeap, pamTracker);
LOG((LF_BCL, LL_INFO100, "Level2 - Creating MethodTable {0x%p}...\n", pClass));
MethodTable* pMT = (MethodTable *)(void *)pamTracker->Track(pCreationHeap->AllocMem(S_SIZE_T(sizeof(MethodTable))));
// Note: Memory allocated on loader heap is zero filled
// memset(pMT, 0, sizeof(MethodTable));
// Allocate the private data block ("private" during runtime in the ngen'ed case).
BYTE* pMTWriteableData = (BYTE *)
pamTracker->Track(pCreationHeap->AllocMem(S_SIZE_T(sizeof(MethodTableWriteableData))));
pMT->SetWriteableData((PTR_MethodTableWriteableData)pMTWriteableData);
//
// Set up the EEClass
//
pClass->SetMethodTable(pMT); // in the EEClass set the pointer to this MethodTable
pClass->SetAttrClass(tdPublic | tdSealed);
//
// Set up the MethodTable
//
// Does not need parent. Note that MethodTable for COR_GLOBAL_PARENT_TOKEN does not have parent either,
// so the system has to be wired for dealing with no parent anyway.
pMT->SetParentMethodTable(NULL);
pMT->SetClass(pClass);
pMT->SetLoaderModule(pContainingModule);
pMT->SetLoaderAllocator(pContainingModule->GetLoaderAllocator());
pMT->SetInternalCorElementType(ELEMENT_TYPE_CLASS);
pMT->SetBaseSize(OBJECT_BASESIZE);
#ifdef _DEBUG
pClass->SetDebugClassName("dynamicClass");
pMT->SetDebugClassName("dynamicClass");
#endif
LOG((LF_BCL, LL_INFO10, "Level1 - MethodTable created {0x%p}\n", pClass));
return pMT;
}
#ifdef FEATURE_COMINTEROP
#ifndef CROSSGEN_COMPILE
//==========================================================================================
OBJECTREF MethodTable::GetObjCreateDelegate()
{
CONTRACTL
{
MODE_COOPERATIVE;
GC_NOTRIGGER;
NOTHROW;
}
CONTRACTL_END;
_ASSERT(!IsInterface());
if (GetOHDelegate())
return ObjectFromHandle(GetOHDelegate());
else
return NULL;
}
//==========================================================================================
void MethodTable::SetObjCreateDelegate(OBJECTREF orDelegate)
{
CONTRACTL
{
MODE_COOPERATIVE;
GC_NOTRIGGER;
THROWS; // From CreateHandle
}
CONTRACTL_END;
if (GetOHDelegate())
StoreObjectInHandle(GetOHDelegate(), orDelegate);
else
SetOHDelegate (GetAppDomain()->CreateHandle(orDelegate));
}
#endif //CROSSGEN_COMPILE
#endif // FEATURE_COMINTEROP
//==========================================================================================
void MethodTable::SetInterfaceMap(WORD wNumInterfaces, InterfaceInfo_t* iMap)
{
LIMITED_METHOD_CONTRACT;
if (wNumInterfaces == 0)
{
_ASSERTE(!HasInterfaceMap());
return;
}
m_wNumInterfaces = wNumInterfaces;
CONSISTENCY_CHECK(IS_ALIGNED(iMap, sizeof(void*)));
m_pInterfaceMap.SetValue(iMap);
}
//==========================================================================================
// Called after GetExtraInterfaceInfoSize above to setup a new MethodTable with the additional memory to track
// extra interface info. If there are a non-zero number of interfaces implemented on this class but
// GetExtraInterfaceInfoSize() returned zero, this call must still be made (with a NULL argument).
void MethodTable::InitializeExtraInterfaceInfo(PVOID pInfo)
{
STANDARD_VM_CONTRACT;
// Check that memory was allocated or not allocated in the right scenarios.
_ASSERTE(((pInfo == NULL) && (GetExtraInterfaceInfoSize(GetNumInterfaces()) == 0)) ||
((pInfo != NULL) && (GetExtraInterfaceInfoSize(GetNumInterfaces()) != 0)));
// This call is a no-op if we don't require extra interface info (in which case a buffer should never have
// been allocated).
if (!HasExtraInterfaceInfo())
{
_ASSERTE(pInfo == NULL);
return;
}
// Get pointer to optional slot that holds either a small inlined bitmap of flags or the pointer to a
// larger bitmap.
PTR_TADDR pInfoSlot = GetExtraInterfaceInfoPtr();
// In either case, data inlined or held in an external buffer, the correct thing to do is to write pInfo
// to the slot. In the inlined case we wish to set all flags to their default value (zero, false) and
// writing NULL does that. Otherwise we simply want to dump the buffer pointer directly into the slot (no
// need for a discriminator bit, we can always infer which format we're using based on the interface
// count).
*pInfoSlot = (TADDR)pInfo;
// There shouldn't be any need for further initialization in the buffered case since loader heap
// allocation zeroes data.
#ifdef _DEBUG
if (pInfo != NULL)
for (DWORD i = 0; i < GetExtraInterfaceInfoSize(GetNumInterfaces()); i++)
_ASSERTE(*((BYTE*)pInfo + i) == 0);
#endif // _DEBUG
}
#ifdef FEATURE_NATIVE_IMAGE_GENERATION
// Ngen support.
void MethodTable::SaveExtraInterfaceInfo(DataImage *pImage)
{
STANDARD_VM_CONTRACT;
// No extra data to save if the number of interfaces is below the threshhold -- there is either no data or
// it all fits into the optional members inline.
if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
return;
pImage->StoreStructure((LPVOID)*GetExtraInterfaceInfoPtr(),
GetExtraInterfaceInfoSize(GetNumInterfaces()),
DataImage::ITEM_INTERFACE_MAP);
}
void MethodTable::FixupExtraInterfaceInfo(DataImage *pImage)
{
STANDARD_VM_CONTRACT;
// No pointer to extra data to fixup if the number of interfaces is below the threshhold -- there is
// either no data or it all fits into the optional members inline.
if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
return;
pImage->FixupPointerField(this, (BYTE*)GetExtraInterfaceInfoPtr() - (BYTE*)this);
}
#endif // FEATURE_NATIVE_IMAGE_GENERATION
// Define a macro that generates a mask for a given bit in a TADDR correctly on either 32 or 64 bit platforms.
#ifdef _WIN64
#define SELECT_TADDR_BIT(_index) (1ULL << (_index))
#else
#define SELECT_TADDR_BIT(_index) (1U << (_index))
#endif
//==========================================================================================
// For the given interface in the map (specified via map index) mark the interface as declared explicitly on
// this class. This is not legal for dynamically added interfaces (as used by RCWs).
void MethodTable::SetInterfaceDeclaredOnClass(DWORD index)
{
STANDARD_VM_CONTRACT;
_ASSERTE(HasExtraInterfaceInfo());
_ASSERTE(index < GetNumInterfaces());
// Get address of optional slot for extra info.
PTR_TADDR pInfoSlot = GetExtraInterfaceInfoPtr();
if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
{
// Bitmap of flags is stored inline in the optional slot.
*pInfoSlot |= SELECT_TADDR_BIT(index);
}
else
{
// Slot points to a buffer containing a larger bitmap.
TADDR *pBitmap = (PTR_TADDR)*pInfoSlot;
DWORD idxTaddr = index / (sizeof(TADDR) * 8); // Select TADDR in array that covers the target bit
DWORD idxInTaddr = index % (sizeof(TADDR) * 8);
TADDR bitmask = SELECT_TADDR_BIT(idxInTaddr);
pBitmap[idxTaddr] |= bitmask;
_ASSERTE((pBitmap[idxTaddr] & bitmask) == bitmask);
}
}
//==========================================================================================
// For the given interface return true if the interface was declared explicitly on this class.
bool MethodTable::IsInterfaceDeclaredOnClass(DWORD index)
{
STANDARD_VM_CONTRACT;
_ASSERTE(HasExtraInterfaceInfo());
// Dynamic interfaces are always marked as not DeclaredOnClass (I don't know why but this is how the code
// was originally authored).
if (index >= GetNumInterfaces())
{
#ifdef FEATURE_COMINTEROP
_ASSERTE(HasDynamicInterfaceMap());
#endif // FEATURE_COMINTEROP
return false;
}
// Get data from the optional extra info slot.
TADDR taddrInfo = *GetExtraInterfaceInfoPtr();
if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
{
// Bitmap of flags is stored directly in the value.
return (taddrInfo & SELECT_TADDR_BIT(index)) != 0;
}
else
{
// Slot points to a buffer containing a larger bitmap.
TADDR *pBitmap = (PTR_TADDR)taddrInfo;
DWORD idxTaddr = index / (sizeof(TADDR) * 8); // Select TADDR in array that covers the target bit
DWORD idxInTaddr = index % (sizeof(TADDR) * 8);
TADDR bitmask = SELECT_TADDR_BIT(idxInTaddr);
return (pBitmap[idxTaddr] & bitmask) != 0;
}
}
#ifdef FEATURE_COMINTEROP
//==========================================================================================
PTR_InterfaceInfo MethodTable::GetDynamicallyAddedInterfaceMap()
{
LIMITED_METHOD_DAC_CONTRACT;
PRECONDITION(HasDynamicInterfaceMap());
return GetInterfaceMap() + GetNumInterfaces();
}
//==========================================================================================
unsigned MethodTable::GetNumDynamicallyAddedInterfaces()
{
LIMITED_METHOD_DAC_CONTRACT;
PRECONDITION(HasDynamicInterfaceMap());
PTR_InterfaceInfo pInterfaces = GetInterfaceMap();
PREFIX_ASSUME(pInterfaces != NULL);
return (unsigned)*(dac_cast<PTR_SIZE_T>(pInterfaces) - 1);
}
//==========================================================================================
BOOL MethodTable::FindDynamicallyAddedInterface(MethodTable *pInterface)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(IsRestored_NoLogging());
_ASSERTE(HasDynamicInterfaceMap()); // This should never be called on for a type that is not an extensible RCW.
unsigned cDynInterfaces = GetNumDynamicallyAddedInterfaces();
InterfaceInfo_t *pDynItfMap = GetDynamicallyAddedInterfaceMap();
for (unsigned i = 0; i < cDynInterfaces; i++)
{
if (pDynItfMap[i].GetMethodTable() == pInterface)
return TRUE;
}
return FALSE;
}
//==========================================================================================
void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(IsRestored_NoLogging());
PRECONDITION(HasDynamicInterfaceMap()); // This should never be called on for a type that is not an extensible RCW.
}
CONTRACTL_END;
unsigned NumDynAddedInterfaces = GetNumDynamicallyAddedInterfaces();
unsigned TotalNumInterfaces = GetNumInterfaces() + NumDynAddedInterfaces;
InterfaceInfo_t *pNewItfMap = NULL;
S_SIZE_T AllocSize = (S_SIZE_T(S_UINT32(TotalNumInterfaces) + S_UINT32(1)) * S_SIZE_T(sizeof(InterfaceInfo_t))) + S_SIZE_T(sizeof(DWORD_PTR));
if (AllocSize.IsOverflow())
ThrowHR(COR_E_OVERFLOW);
// Allocate the new interface table adding one for the new interface and one
// more for the dummy slot before the start of the table..
pNewItfMap = (InterfaceInfo_t*)(void*)GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(AllocSize);
pNewItfMap = (InterfaceInfo_t*)(((BYTE *)pNewItfMap) + sizeof(DWORD_PTR));
// Copy the old map into the new one.
if (TotalNumInterfaces > 0) {
InterfaceInfo_t *pInterfaceMap = GetInterfaceMap();
PREFIX_ASSUME(pInterfaceMap != NULL);
for (unsigned index = 0; index < TotalNumInterfaces; ++index)
{
InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pNewItfMap + index);
pIntInfo->SetMethodTable((pInterfaceMap + index)->GetMethodTable());
}
}
// Add the new interface at the end of the map.
pNewItfMap[TotalNumInterfaces].SetMethodTable(pItfMT);
// Update the count of dynamically added interfaces.
*(((DWORD_PTR *)pNewItfMap) - 1) = NumDynAddedInterfaces + 1;
// Switch the old interface map with the new one.
EnsureWritablePages(&m_pInterfaceMap);
m_pInterfaceMap.SetValueVolatile(pNewItfMap);
// Log the fact that we leaked the interface vtable map.
#ifdef _DEBUG
LOG((LF_INTEROP, LL_EVERYTHING,
"Extensible RCW %s being cast to interface %s caused an interface vtable map leak",
GetClass()->GetDebugClassName(), pItfMT->GetClass()->m_szDebugClassName));
#else // !_DEBUG
LOG((LF_INTEROP, LL_EVERYTHING,
"Extensible RCW being cast to an interface caused an interface vtable map leak"));
#endif // !_DEBUG
} // MethodTable::AddDynamicInterface
#endif // FEATURE_COMINTEROP
void MethodTable::SetupGenericsStaticsInfo(FieldDesc* pStaticFieldDescs)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// No need to generate IDs for open types. Indeed since we don't save them
// in the NGEN image it would be actively incorrect to do so. However
// we still leave the optional member in the MethodTable holding the value -1 for the ID.
GenericsStaticsInfo *pInfo = GetGenericsStaticsInfo();
if (!ContainsGenericVariables() && !IsSharedByGenericInstantiations())
{
Module * pModuleForStatics = GetLoaderModule();
pInfo->m_DynamicTypeID = pModuleForStatics->AllocateDynamicEntry(this);
}
else
{
pInfo->m_DynamicTypeID = (SIZE_T)-1;
}
pInfo->m_pFieldDescs.SetValueMaybeNull(pStaticFieldDescs);
}
#endif // !DACCESS_COMPILE
//==========================================================================================
// Calculate how many bytes of storage will be required to track additional information for interfaces. This
// will be zero if there are no interfaces, but can also be zero for small numbers of interfaces as well, and
// callers should be ready to handle this.
/* static */ SIZE_T MethodTable::GetExtraInterfaceInfoSize(DWORD cInterfaces)
{
LIMITED_METHOD_DAC_CONTRACT;
// For small numbers of interfaces we can record the info in the TADDR of the optional member itself (use
// the TADDR as a bitmap).
if (cInterfaces <= kInlinedInterfaceInfoThreshhold)
return 0;
// Otherwise we'll cause an array of TADDRs to be allocated (use TADDRs since the heap space allocated
// will almost certainly need to be TADDR aligned anyway).
return ALIGN_UP(cInterfaces, sizeof(TADDR) * 8) / 8;
}
#ifdef DACCESS_COMPILE
//==========================================================================================
void MethodTable::EnumMemoryRegionsForExtraInterfaceInfo()
{
SUPPORTS_DAC;
// No extra data to enum if the number of interfaces is below the threshhold -- there is either no data or
// it all fits into the optional members inline.
if (GetNumInterfaces() <= kInlinedInterfaceInfoThreshhold)
return;
DacEnumMemoryRegion(*GetExtraInterfaceInfoPtr(), GetExtraInterfaceInfoSize(GetNumInterfaces()));
}
#endif // DACCESS_COMPILE
//==========================================================================================
Module* MethodTable::GetModuleForStatics()
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
g_IBCLogger.LogMethodTableAccess(this);
if (HasGenericsStaticsInfo())
{
DWORD dwDynamicClassDomainID;
return GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
}
else
{
return GetLoaderModule();
}
}
//==========================================================================================
DWORD MethodTable::GetModuleDynamicEntryID()
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
_ASSERTE(IsDynamicStatics() && "Only memory reflection emit types and generics can have a dynamic ID");
if (HasGenericsStaticsInfo())
{
DWORD dwDynamicClassDomainID;
GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
return dwDynamicClassDomainID;
}
else
{
return GetClass()->GetModuleDynamicID();
}
}
#ifndef DACCESS_COMPILE
#ifdef FEATURE_TYPEEQUIVALENCE
//==========================================================================================
// Equivalence based on Guid and TypeIdentifier attributes to support the "no-PIA" feature.
BOOL MethodTable::IsEquivalentTo_Worker(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited))
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
SO_TOLERANT; // we are called from MethodTable::CanCastToClass
}
CONTRACTL_END;
_ASSERTE(HasTypeEquivalence() && pOtherMT->HasTypeEquivalence());
#ifdef _DEBUG
if (TypeHandlePairList::Exists(pVisited, TypeHandle(this), TypeHandle(pOtherMT)))
{
_ASSERTE(!"We are in the process of comparing these types already. That should never happen!");
return TRUE;
}
TypeHandlePairList newVisited(TypeHandle(this), TypeHandle(pOtherMT), pVisited);
#endif
if (HasInstantiation() != pOtherMT->HasInstantiation())
return FALSE;
if (IsArray())
{
if (!pOtherMT->IsArray() || GetRank() != pOtherMT->GetRank())
return FALSE;
// arrays of structures have their own unshared MTs and will take this path
return (GetApproxArrayElementTypeHandle().IsEquivalentTo(pOtherMT->GetApproxArrayElementTypeHandle() COMMA_INDEBUG(&newVisited)));
}
BOOL bResult = FALSE;
BEGIN_SO_INTOLERANT_CODE(GetThread());
bResult = IsEquivalentTo_WorkerInner(pOtherMT COMMA_INDEBUG(&newVisited));
END_SO_INTOLERANT_CODE;
return bResult;
}
//==========================================================================================
// Type equivalence - SO intolerant part.
BOOL MethodTable::IsEquivalentTo_WorkerInner(MethodTable *pOtherMT COMMA_INDEBUG(TypeHandlePairList *pVisited))
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
SO_INTOLERANT;
LOADS_TYPE(CLASS_DEPENDENCIES_LOADED);
}
CONTRACTL_END;
AppDomain *pDomain = GetAppDomain();
if (pDomain != NULL)
{
TypeEquivalenceHashTable::EquivalenceMatch match = pDomain->GetTypeEquivalenceCache()->CheckEquivalence(TypeHandle(this), TypeHandle(pOtherMT));
switch (match)
{
case TypeEquivalenceHashTable::Match:
return TRUE;
case TypeEquivalenceHashTable::NoMatch:
return FALSE;
case TypeEquivalenceHashTable::MatchUnknown:
break;
default:
_ASSERTE(FALSE);
break;
}
}
BOOL fEquivalent = FALSE;
if (HasInstantiation())
{
// we limit variance on generics only to interfaces
if (!IsInterface() || !pOtherMT->IsInterface())
{
fEquivalent = FALSE;
goto EquivalenceCalculated;
}
// check whether the instantiations are equivalent
Instantiation inst1 = GetInstantiation();
Instantiation inst2 = pOtherMT->GetInstantiation();
if (inst1.GetNumArgs() != inst2.GetNumArgs())
{
fEquivalent = FALSE;
goto EquivalenceCalculated;
}
for (DWORD i = 0; i < inst1.GetNumArgs(); i++)
{
if (!inst1[i].IsEquivalentTo(inst2[i] COMMA_INDEBUG(pVisited)))
{
fEquivalent = FALSE;
goto EquivalenceCalculated;
}
}
if (GetTypeDefRid() == pOtherMT->GetTypeDefRid() && GetModule() == pOtherMT->GetModule())
{
// it's OK to declare the MTs equivalent at this point; the cases we care
// about are IList<IFoo> and IList<IBar> where IFoo and IBar are equivalent
fEquivalent = TRUE;
}
else
{
fEquivalent = FALSE;
}
goto EquivalenceCalculated;
}
if (IsArray())
{
if (!pOtherMT->IsArray() || GetRank() != pOtherMT->GetRank())
{
fEquivalent = FALSE;
goto EquivalenceCalculated;
}
// arrays of structures have their own unshared MTs and will take this path
fEquivalent = (GetApproxArrayElementTypeHandle().IsEquivalentTo(pOtherMT->GetApproxArrayElementTypeHandle() COMMA_INDEBUG(pVisited)));
goto EquivalenceCalculated;
}
fEquivalent = CompareTypeDefsForEquivalence(GetCl(), pOtherMT->GetCl(), GetModule(), pOtherMT->GetModule(), NULL);
EquivalenceCalculated:
// Only record equivalence matches if we are in an AppDomain
if (pDomain != NULL)
{
// Collectible type results will not get cached.
if ((!this->Collectible() && !pOtherMT->Collectible()))
{
TypeEquivalenceHashTable::EquivalenceMatch match;
match = fEquivalent ? TypeEquivalenceHashTable::Match : TypeEquivalenceHashTable::NoMatch;
pDomain->GetTypeEquivalenceCache()->RecordEquivalence(TypeHandle(this), TypeHandle(pOtherMT), match);
}
}
return fEquivalent;
}
#endif // FEATURE_TYPEEQUIVALENCE
//==========================================================================================
BOOL MethodTable::CanCastToInterface(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(pTargetMT->IsInterface());
PRECONDITION(!IsTransparentProxy());
PRECONDITION(IsRestored_NoLogging());
}
CONTRACTL_END
if (!pTargetMT->HasVariance())
{
if (HasTypeEquivalence() || pTargetMT->HasTypeEquivalence())
{
if (IsInterface() && IsEquivalentTo(pTargetMT))
return TRUE;
return ImplementsEquivalentInterface(pTargetMT);
}
return CanCastToNonVariantInterface(pTargetMT);
}
else
{
if (CanCastByVarianceToInterfaceOrDelegate(pTargetMT, pVisited))
return TRUE;
InterfaceMapIterator it = IterateInterfaceMap();
while (it.Next())
{
if (it.GetInterface()->CanCastByVarianceToInterfaceOrDelegate(pTargetMT, pVisited))
return TRUE;
}
}
return FALSE;
}
//==========================================================================================
BOOL MethodTable::CanCastByVarianceToInterfaceOrDelegate(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(pTargetMT->HasVariance());
PRECONDITION(pTargetMT->IsInterface() || pTargetMT->IsDelegate());
PRECONDITION(IsRestored_NoLogging());
}
CONTRACTL_END
BOOL returnValue = FALSE;
EEClass *pClass = NULL;
TypeHandlePairList pairList(this, pTargetMT, pVisited);
if (TypeHandlePairList::Exists(pVisited, this, pTargetMT))
goto Exit;
if (GetTypeDefRid() != pTargetMT->GetTypeDefRid() || GetModule() != pTargetMT->GetModule())
{
goto Exit;
}
{
pClass = pTargetMT->GetClass();
Instantiation inst = GetInstantiation();
Instantiation targetInst = pTargetMT->GetInstantiation();
for (DWORD i = 0; i < inst.GetNumArgs(); i++)
{
TypeHandle thArg = inst[i];
TypeHandle thTargetArg = targetInst[i];
// If argument types are not equivalent, test them for compatibility
// in accordance with the the variance annotation
if (!thArg.IsEquivalentTo(thTargetArg))
{
switch (pClass->GetVarianceOfTypeParameter(i))
{
case gpCovariant :
if (!thArg.IsBoxedAndCanCastTo(thTargetArg, &pairList))
goto Exit;
break;
case gpContravariant :
if (!thTargetArg.IsBoxedAndCanCastTo(thArg, &pairList))
goto Exit;
break;
case gpNonVariant :
goto Exit;
default :
_ASSERTE(!"Illegal variance annotation");
goto Exit;
}
}
}
}
returnValue = TRUE;
Exit:
return returnValue;
}
//==========================================================================================
BOOL MethodTable::CanCastToClass(MethodTable *pTargetMT, TypeHandlePairList *pVisited)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(!pTargetMT->IsArray());
PRECONDITION(!pTargetMT->IsInterface());
}
CONTRACTL_END
MethodTable *pMT = this;
// If the target type has variant type parameters, we take a slower path
if (pTargetMT->HasVariance())
{
// At present, we support variance only on delegates and interfaces
CONSISTENCY_CHECK(pTargetMT->IsDelegate());
// First chase inheritance hierarchy until we hit a class that only differs in its instantiation
do {
// Cheap check for equivalence
if (pMT->IsEquivalentTo(pTargetMT))
return TRUE;
g_IBCLogger.LogMethodTableAccess(pMT);
if (pMT->CanCastByVarianceToInterfaceOrDelegate(pTargetMT, pVisited))
return TRUE;
pMT = pMT->GetParentMethodTable();
} while (pMT);
}
// If there are no variant type parameters, just chase the hierarchy
else
{
do {
if (pMT->IsEquivalentTo(pTargetMT))
return TRUE;
g_IBCLogger.LogMethodTableAccess(pMT);
pMT = pMT->GetParentMethodTable();
} while (pMT);
}
return FALSE;
}
#include <optsmallperfcritical.h>
//==========================================================================================
BOOL MethodTable::CanCastToNonVariantInterface(MethodTable *pTargetMT)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(pTargetMT->IsInterface());
PRECONDITION(!pTargetMT->HasVariance());
PRECONDITION(!IsTransparentProxy());
PRECONDITION(IsRestored_NoLogging());
}
CONTRACTL_END
// Check to see if the current class is for the interface passed in.
if (this == pTargetMT)
return TRUE;
// Check to see if the static class definition indicates we implement the interface.
return ImplementsInterfaceInline(pTargetMT);
}
//==========================================================================================
TypeHandle::CastResult MethodTable::CanCastToInterfaceNoGC(MethodTable *pTargetMT)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(pTargetMT->IsInterface());
PRECONDITION(!IsTransparentProxy());
PRECONDITION(IsRestored_NoLogging());
}
CONTRACTL_END
if (!pTargetMT->HasVariance() && !IsArray() && !HasTypeEquivalence() && !pTargetMT->HasTypeEquivalence())
{
return CanCastToNonVariantInterface(pTargetMT) ? TypeHandle::CanCast : TypeHandle::CannotCast;
}
else
{
// We're conservative on variant interfaces and types with equivalence
return TypeHandle::MaybeCast;
}
}
//==========================================================================================
TypeHandle::CastResult MethodTable::CanCastToClassNoGC(MethodTable *pTargetMT)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(!pTargetMT->IsArray());
PRECONDITION(!pTargetMT->IsInterface());
}
CONTRACTL_END
// We're conservative on variant classes
if (pTargetMT->HasVariance() || g_IBCLogger.InstrEnabled())
{
return TypeHandle::MaybeCast;
}
// Type equivalence needs the slow path
if (HasTypeEquivalence() || pTargetMT->HasTypeEquivalence())
{
return TypeHandle::MaybeCast;
}
// If there are no variant type parameters, just chase the hierarchy
else
{
PTR_VOID pMT = this;
do {
if (pMT == pTargetMT)
return TypeHandle::CanCast;
pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
} while (pMT);
}
return TypeHandle::CannotCast;
}
#include <optdefault.h>
BOOL
MethodTable::IsExternallyVisible()
{
CONTRACTL
{
THROWS;
MODE_ANY;
GC_TRIGGERS;
SO_INTOLERANT;
}
CONTRACTL_END;
BOOL bIsVisible = IsTypeDefExternallyVisible(GetCl(), GetModule(), GetClass()->GetAttrClass());
if (bIsVisible && HasInstantiation() && !IsGenericTypeDefinition())
{
for (COUNT_T i = 0; i < GetNumGenericArgs(); i++)
{
if (!GetInstantiation()[i].IsExternallyVisible())
return FALSE;
}
}
return bIsVisible;
} // MethodTable::IsExternallyVisible
#ifdef FEATURE_PREJIT
BOOL MethodTable::CanShareVtableChunksFrom(MethodTable *pTargetMT, Module *pCurrentLoaderModule, Module *pCurrentPreferredZapModule)
{
WRAPPER_NO_CONTRACT;
// These constraints come from two places:
// 1. A non-zapped MT cannot share with a zapped MT since it may result in SetSlot() on a read-only slot
// 2. Zapping this MT in MethodTable::Save cannot "unshare" something we decide to share now
//
// We could fix both of these and allow non-zapped MTs to share chunks fully by doing the following
// 1. Fix the few dangerous callers of SetSlot to first check whether the chunk itself is zapped
// (see MethodTableBuilder::CopyExactParentSlots, or we could use ExecutionManager::FindZapModule)
// 2. Have this function return FALSE if IsCompilationProcess and rely on MethodTable::Save to do all sharing for the NGen case
return !pTargetMT->IsZapped() &&
pTargetMT->GetLoaderModule() == pCurrentLoaderModule &&
pCurrentLoaderModule == pCurrentPreferredZapModule &&
pCurrentPreferredZapModule == Module::GetPreferredZapModuleForMethodTable(pTargetMT);
}
#else
BOOL MethodTable::CanShareVtableChunksFrom(MethodTable *pTargetMT, Module *pCurrentLoaderModule)
{
WRAPPER_NO_CONTRACT;
return pTargetMT->GetLoaderModule() == pCurrentLoaderModule;
}
#endif
#ifdef _DEBUG
void
MethodTable::DebugDumpVtable(LPCUTF8 szClassName, BOOL fDebug)
{
//diag functions shouldn't affect normal behavior
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
CQuickBytes qb;
const size_t cchBuff = MAX_CLASSNAME_LENGTH + 30;
LPWSTR buff = fDebug ? (LPWSTR) qb.AllocNoThrow(cchBuff * sizeof(WCHAR)) : NULL;
if ((buff == NULL) && fDebug)
{
WszOutputDebugString(W("OOM when dumping VTable - falling back to logging"));
fDebug = FALSE;
}
if (fDebug)
{
swprintf_s(buff, cchBuff, W("Vtable (with interface dupes) for '%S':\n"), szClassName);
#ifdef _DEBUG
swprintf_s(&buff[wcslen(buff)], cchBuff - wcslen(buff) , W(" Total duplicate slots = %d\n"), g_dupMethods);
#endif
WszOutputDebugString(buff);
}
else
{
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, "Vtable (with interface dupes) for '%s':\n", szClassName));
LOG((LF_ALWAYS, LL_ALWAYS, " Total duplicate slots = %d\n", g_dupMethods));
}
HRESULT hr;
EX_TRY
{
MethodIterator it(this);
for (; it.IsValid(); it.Next())
{
MethodDesc *pMD = it.GetMethodDesc();
LPCUTF8 pszName = pMD->GetName((USHORT) it.GetSlotNumber());
DWORD dwAttrs = pMD->GetAttrs();
if (fDebug)
{
DefineFullyQualifiedNameForClass();
LPCUTF8 name = GetFullyQualifiedNameForClass(pMD->GetMethodTable());
swprintf_s(buff, cchBuff,
W(" slot %2d: %S::%S%S 0x%p (slot = %2d)\n"),
it.GetSlotNumber(),
name,
pszName,
IsMdFinal(dwAttrs) ? " (final)" : "",
pMD->GetMethodEntryPoint(),
pMD->GetSlot()
);
WszOutputDebugString(buff);
}
else
{
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS,
" slot %2d: %s::%s%s 0x%p (slot = %2d)\n",
it.GetSlotNumber(),
pMD->GetClass()->GetDebugClassName(),
pszName,
IsMdFinal(dwAttrs) ? " (final)" : "",
pMD->GetMethodEntryPoint(),
pMD->GetSlot()
));
}
if (it.GetSlotNumber() == (DWORD)(GetNumMethods()-1))
{
if (fDebug)
{
WszOutputDebugString(W(" <-- vtable ends here\n"));
}
else
{
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, " <-- vtable ends here\n"));
}
}
}
}
EX_CATCH_HRESULT(hr);
if (fDebug)
{
WszOutputDebugString(W("\n"));
}
else
{
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
}
} // MethodTable::DebugDumpVtable
void
MethodTable::Debug_DumpInterfaceMap(
LPCSTR szInterfaceMapPrefix)
{
// Diagnostic functions shouldn't affect normal behavior
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
if (GetNumInterfaces() == 0)
{ // There are no interfaces, no point in printing interface map info
return;
}
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS,
"%s Interface Map for '%s':\n",
szInterfaceMapPrefix,
GetDebugClassName()));
LOG((LF_ALWAYS, LL_ALWAYS,
" Number of interfaces = %d\n",
GetNumInterfaces()));
HRESULT hr;
EX_TRY
{
InterfaceMapIterator it(this);
while (it.Next())
{
MethodTable *pInterfaceMT = it.GetInterface();
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS,
" index %2d: %s 0x%p\n",
it.GetIndex(),
pInterfaceMT->GetDebugClassName(),
pInterfaceMT));
}
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, " <-- interface map ends here\n"));
}
EX_CATCH_HRESULT(hr);
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, "\n"));
} // MethodTable::Debug_DumpInterfaceMap
void
MethodTable::Debug_DumpDispatchMap()
{
WRAPPER_NO_CONTRACT; // It's a dev helper, we don't care about contracts
if (!HasDispatchMap())
{ // There is no dipstch map for this type, no point in printing the info
return;
}
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, "Dispatch Map for '%s':\n", GetDebugClassName()));
InterfaceInfo_t * pInterfaceMap = GetInterfaceMap();
DispatchMap::EncodedMapIterator it(this);
while (it.IsValid())
{
DispatchMapEntry *pEntry = it.Entry();
UINT32 nInterfaceIndex = pEntry->GetTypeID().GetInterfaceNum();
_ASSERTE(nInterfaceIndex < GetNumInterfaces());
MethodTable * pInterface = pInterfaceMap[nInterfaceIndex].GetMethodTable();
UINT32 nInterfaceSlotNumber = pEntry->GetSlotNumber();
UINT32 nImplementationSlotNumber = pEntry->GetTargetSlotNumber();
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS,
" Interface %d (%s) slot %d (%s) implemented in slot %d (%s)\n",
nInterfaceIndex,
pInterface->GetDebugClassName(),
nInterfaceSlotNumber,
pInterface->GetMethodDescForSlot(nInterfaceSlotNumber)->GetName(),
nImplementationSlotNumber,
GetMethodDescForSlot(nImplementationSlotNumber)->GetName()));
it.Next();
}
//LF_ALWAYS allowed here because this is controlled by special env var code:EEConfig::ShouldDumpOnClassLoad
LOG((LF_ALWAYS, LL_ALWAYS, " <-- Dispatch map ends here\n"));
} // MethodTable::Debug_DumpDispatchMap
#endif //_DEBUG
//==========================================================================================
NOINLINE BOOL MethodTable::ImplementsInterface(MethodTable *pInterface)
{
WRAPPER_NO_CONTRACT;
return ImplementsInterfaceInline(pInterface);
}
//==========================================================================================
BOOL MethodTable::ImplementsEquivalentInterface(MethodTable *pInterface)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
SO_TOLERANT;
PRECONDITION(pInterface->IsInterface()); // class we are looking up should be an interface
}
CONTRACTL_END;
// look for exact match first (optimize for success)
if (ImplementsInterfaceInline(pInterface))
return TRUE;
if (!pInterface->HasTypeEquivalence())
return FALSE;
DWORD numInterfaces = GetNumInterfaces();
if (numInterfaces == 0)
return FALSE;
InterfaceInfo_t *pInfo = GetInterfaceMap();
do
{
if (pInfo->GetMethodTable()->IsEquivalentTo(pInterface))
return TRUE;
pInfo++;
}
while (--numInterfaces);
return FALSE;
}
//==========================================================================================
MethodDesc *MethodTable::GetMethodDescForInterfaceMethod(MethodDesc *pInterfaceMD)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
PRECONDITION(!pInterfaceMD->HasClassOrMethodInstantiation());
}
CONTRACTL_END;
WRAPPER_NO_CONTRACT;
return GetMethodDescForInterfaceMethod(TypeHandle(pInterfaceMD->GetMethodTable()), pInterfaceMD);
}
//==========================================================================================
MethodDesc *MethodTable::GetMethodDescForInterfaceMethod(TypeHandle ownerType, MethodDesc *pInterfaceMD)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
PRECONDITION(!ownerType.IsNull());
PRECONDITION(ownerType.GetMethodTable()->IsInterface());
PRECONDITION(ownerType.GetMethodTable()->HasSameTypeDefAs(pInterfaceMD->GetMethodTable()));
PRECONDITION(IsArray() || ImplementsEquivalentInterface(ownerType.GetMethodTable()) || ownerType.GetMethodTable()->HasVariance());
}
CONTRACTL_END;
MethodDesc *pMD = NULL;
MethodTable *pInterfaceMT = ownerType.AsMethodTable();
#ifdef CROSSGEN_COMPILE
DispatchSlot implSlot(FindDispatchSlot(pInterfaceMT->GetTypeID(), pInterfaceMD->GetSlot()));
PCODE pTgt = implSlot.GetTarget();
#else
PCODE pTgt = VirtualCallStubManager::GetTarget(
pInterfaceMT->GetLoaderAllocator()->GetDispatchToken(pInterfaceMT->GetTypeID(), pInterfaceMD->GetSlot()),
this);
#endif
pMD = MethodTable::GetMethodDescForSlotAddress(pTgt);
#ifdef _DEBUG
MethodDesc *pDispSlotMD = FindDispatchSlotForInterfaceMD(ownerType, pInterfaceMD).GetMethodDesc();
_ASSERTE(pDispSlotMD == pMD);
#endif // _DEBUG
pMD->CheckRestore();
return pMD;
}
#endif // DACCESS_COMPILE
//==========================================================================================
PTR_FieldDesc MethodTable::GetFieldDescByIndex(DWORD fieldIndex)
{
LIMITED_METHOD_CONTRACT;
if (HasGenericsStaticsInfo() &&
fieldIndex >= GetNumIntroducedInstanceFields())
{
return GetGenericsStaticFieldDescs() + (fieldIndex - GetNumIntroducedInstanceFields());
}
else
{
return GetClass()->GetFieldDescList() + fieldIndex;
}
}
//==========================================================================================
DWORD MethodTable::GetIndexForFieldDesc(FieldDesc *pField)
{
LIMITED_METHOD_CONTRACT;
if (pField->IsStatic() && HasGenericsStaticsInfo())
{
FieldDesc *pStaticFields = GetGenericsStaticFieldDescs();
return GetNumIntroducedInstanceFields() + DWORD(pField - pStaticFields);
}
else
{
FieldDesc *pFields = GetClass()->GetFieldDescList();
return DWORD(pField - pFields);
}
}
//==========================================================================================
#ifdef _MSC_VER
#pragma optimize("t", on)
#endif // _MSC_VER
// compute whether the type can be considered to have had its
// static initialization run without doing anything at all, i.e. whether we know
// immediately that the type requires nothing to do for initialization
//
// If a type used as a representiative during JITting is PreInit then
// any types that it may represent within a code-sharing
// group are also PreInit. For example, if List<object> is PreInit then List<string>
// and List<MyType> are also PreInit. This is because the dynamicStatics, staticRefHandles
// and hasCCtor are all identical given a head type, and weakening the domainNeutrality
// to DomainSpecific only makes more types PreInit.
BOOL MethodTable::IsClassPreInited()
{
LIMITED_METHOD_CONTRACT;
if (ContainsGenericVariables())
return TRUE;
if (HasClassConstructor())
return FALSE;
if (HasBoxedRegularStatics())
return FALSE;
if (IsDynamicStatics())
return FALSE;
return TRUE;
}
#ifdef _MSC_VER
#pragma optimize("", on)
#endif // _MSC_VER
//========================================================================================
#if defined(UNIX_AMD64_ABI_ITF)
#if defined(_DEBUG) && defined(LOGGING)
static
const char* GetSystemVClassificationTypeName(SystemVClassificationType t)
{
switch (t)
{
case SystemVClassificationTypeUnknown: return "Unknown";
case SystemVClassificationTypeStruct: return "Struct";
case SystemVClassificationTypeNoClass: return "NoClass";
case SystemVClassificationTypeMemory: return "Memory";
case SystemVClassificationTypeInteger: return "Integer";
case SystemVClassificationTypeIntegerReference: return "IntegerReference";
case SystemVClassificationTypeIntegerByRef: return "IntegerByReference";
case SystemVClassificationTypeSSE: return "SSE";
case SystemVClassificationTypeTypedReference: return "TypedReference";
default: return "ERROR";
}
};
#endif // _DEBUG && LOGGING
// Returns 'true' if the struct is passed in registers, 'false' otherwise.
bool MethodTable::ClassifyEightBytes(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel, unsigned int startOffsetOfStruct, bool useNativeLayout)
{
if (useNativeLayout)
{
return ClassifyEightBytesWithNativeLayout(helperPtr, nestingLevel, startOffsetOfStruct, useNativeLayout);
}
else
{
return ClassifyEightBytesWithManagedLayout(helperPtr, nestingLevel, startOffsetOfStruct, useNativeLayout);
}
}
// If we have a field classification already, but there is a union, we must merge the classification type of the field. Returns the
// new, merged classification type.
/* static */
static SystemVClassificationType ReClassifyField(SystemVClassificationType originalClassification, SystemVClassificationType newFieldClassification)
{
_ASSERTE((newFieldClassification == SystemVClassificationTypeInteger) ||
(newFieldClassification == SystemVClassificationTypeIntegerReference) ||
(newFieldClassification == SystemVClassificationTypeIntegerByRef) ||
(newFieldClassification == SystemVClassificationTypeSSE));
switch (newFieldClassification)
{
case SystemVClassificationTypeInteger:
// Integer overrides everything; the resulting classification is Integer. Can't merge Integer and IntegerReference.
_ASSERTE((originalClassification == SystemVClassificationTypeInteger) ||
(originalClassification == SystemVClassificationTypeSSE));
return SystemVClassificationTypeInteger;
case SystemVClassificationTypeSSE:
// If the old and new classifications are both SSE, then the merge is SSE, otherwise it will be integer. Can't merge SSE and IntegerReference.
_ASSERTE((originalClassification == SystemVClassificationTypeInteger) ||
(originalClassification == SystemVClassificationTypeSSE));
if (originalClassification == SystemVClassificationTypeSSE)
{
return SystemVClassificationTypeSSE;
}
else
{
return SystemVClassificationTypeInteger;
}
case SystemVClassificationTypeIntegerReference:
// IntegerReference can only merge with IntegerReference.
_ASSERTE(originalClassification == SystemVClassificationTypeIntegerReference);
return SystemVClassificationTypeIntegerReference;
case SystemVClassificationTypeIntegerByRef:
// IntegerByReference can only merge with IntegerByReference.
_ASSERTE(originalClassification == SystemVClassificationTypeIntegerByRef);
return SystemVClassificationTypeIntegerByRef;
default:
_ASSERTE(false); // Unexpected type.
return SystemVClassificationTypeUnknown;
}
}
// Returns 'true' if the struct is passed in registers, 'false' otherwise.
bool MethodTable::ClassifyEightBytesWithManagedLayout(SystemVStructRegisterPassingHelperPtr helperPtr,
unsigned int nestingLevel,
unsigned int startOffsetOfStruct,
bool useNativeLayout)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
WORD numIntroducedFields = GetNumIntroducedInstanceFields();
// It appears the VM gives a struct with no fields of size 1.
// Don't pass in register such structure.
if (numIntroducedFields == 0)
{
return false;
}
// No struct register passing with explicit layout. There may be cases where explicit layout may be still
// eligible for register struct passing, but it is hard to tell the real intent. Make it simple and just
// unconditionally disable register struct passing for explicit layout.
if (GetClass()->HasExplicitFieldOffsetLayout())
{
LOG((LF_JIT, LL_EVERYTHING, "%*s**** ClassifyEightBytesWithManagedLayout: struct %s has explicit layout; will not be enregistered\n",
nestingLevel * 5, "", this->GetDebugClassName()));
return false;
}
// The SIMD Intrinsic types are meant to be handled specially and should not be passed as struct registers
if (IsIntrinsicType())
{
LPCUTF8 namespaceName;
LPCUTF8 className = GetFullyQualifiedNameInfo(&namespaceName);
if ((strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector128`1") == 0) ||
(strcmp(className, "Vector64`1") == 0))
{
assert(strcmp(namespaceName, "System.Runtime.Intrinsics") == 0);
LOG((LF_JIT, LL_EVERYTHING, "%*s**** ClassifyEightBytesWithManagedLayout: struct %s is a SIMD intrinsic type; will not be enregistered\n",
nestingLevel * 5, "", this->GetDebugClassName()));
return false;
}
}
#ifdef _DEBUG
LOG((LF_JIT, LL_EVERYTHING, "%*s**** Classify %s (%p), startOffset %d, total struct size %d\n",
nestingLevel * 5, "", this->GetDebugClassName(), this, startOffsetOfStruct, helperPtr->structSize));
int fieldNum = -1;
#endif // _DEBUG
FieldDesc *pField = GetApproxFieldDescListRaw();
FieldDesc *pFieldEnd = pField + numIntroducedFields;
// System types are loaded before others, so ByReference<T> would be loaded before Span<T> or any other type that has a
// ByReference<T> field. ByReference<T> is the first by-ref-like system type to be loaded (see
// SystemDomain::LoadBaseSystemClasses), so if the current method table is marked as by-ref-like and g_pByReferenceClass is
// null, it must be the initial load of ByReference<T>.
bool isThisByReferenceOfT = IsByRefLike() && (g_pByReferenceClass == nullptr || HasSameTypeDefAs(g_pByReferenceClass));
for (; pField < pFieldEnd; pField++)
{
#ifdef _DEBUG
++fieldNum;
#endif // _DEBUG
DWORD fieldOffset = pField->GetOffset();
unsigned normalizedFieldOffset = fieldOffset + startOffsetOfStruct;
unsigned int fieldSize = pField->GetSize();
_ASSERTE(fieldSize != (unsigned int)-1);
// The field can't span past the end of the struct.
if ((normalizedFieldOffset + fieldSize) > helperPtr->structSize)
{
_ASSERTE(false && "Invalid struct size. The size of fields and overall size don't agree");
return false;
}
CorElementType fieldType = pField->GetFieldType();
SystemVClassificationType fieldClassificationType;
if (isThisByReferenceOfT)
{
// ByReference<T> is a special type whose single IntPtr field holds a by-ref potentially interior pointer to GC
// memory, so classify its field as such
_ASSERTE(numIntroducedFields == 1);
_ASSERTE(fieldType == CorElementType::ELEMENT_TYPE_I);
fieldClassificationType = SystemVClassificationTypeIntegerByRef;
}
else
{
fieldClassificationType = CorInfoType2UnixAmd64Classification(fieldType);
}
#ifdef _DEBUG
LPCUTF8 fieldName;
pField->GetName_NoThrow(&fieldName);
#endif // _DEBUG
if (fieldClassificationType == SystemVClassificationTypeStruct)
{
TypeHandle th = pField->GetApproxFieldTypeHandleThrowing();
_ASSERTE(!th.IsNull());
MethodTable* pFieldMT = th.GetMethodTable();
bool inEmbeddedStructPrev = helperPtr->inEmbeddedStruct;
helperPtr->inEmbeddedStruct = true;
bool structRet = false;
// If classifying for marshaling/PInvoke and the aggregated struct has a native layout
// use the native classification. If not, continue using the managed layout.
if (useNativeLayout && pFieldMT->HasLayout())
{
structRet = pFieldMT->ClassifyEightBytesWithNativeLayout(helperPtr, nestingLevel + 1, normalizedFieldOffset, useNativeLayout);
}
else
{
structRet = pFieldMT->ClassifyEightBytesWithManagedLayout(helperPtr, nestingLevel + 1, normalizedFieldOffset, useNativeLayout);
}
helperPtr->inEmbeddedStruct = inEmbeddedStructPrev;
if (!structRet)
{
// If the nested struct says not to enregister, there's no need to continue analyzing at this level. Just return do not enregister.
return false;
}
continue;
}
if (fieldClassificationType == SystemVClassificationTypeTypedReference ||
CorInfoType2UnixAmd64Classification(GetClass_NoLogging()->GetInternalCorElementType()) == SystemVClassificationTypeTypedReference)
{
// The TypedReference is a very special type.
// In source/metadata it has two fields - Type and Value and both are defined of type IntPtr.
// When the VM creates a layout of the type it changes the type of the Value to ByRef type and the
// type of the Type field is left to IntPtr (TYPE_I internally - native int type.)
// This requires a special treatment of this type. The code below handles the both fields (and this entire type).
for (unsigned i = 0; i < 2; i++)
{
fieldSize = 8;
fieldOffset = (i == 0 ? 0 : 8);
normalizedFieldOffset = fieldOffset + startOffsetOfStruct;
fieldClassificationType = (i == 0 ? SystemVClassificationTypeIntegerByRef : SystemVClassificationTypeInteger);
if ((normalizedFieldOffset % fieldSize) != 0)
{
// The spec requires that struct values on the stack from register passed fields expects
// those fields to be at their natural alignment.
LOG((LF_JIT, LL_EVERYTHING, " %*sxxxx Field %d %s: offset %d (normalized %d), size %d not at natural alignment; not enregistering struct\n",
nestingLevel * 5, "", fieldNum, fieldNum, (i == 0 ? "Value" : "Type"), fieldOffset, normalizedFieldOffset, fieldSize));
return false;
}
helperPtr->largestFieldOffset = (int)normalizedFieldOffset;
// Set the data for a new field.
// The new field classification must not have been initialized yet.
_ASSERTE(helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField] == SystemVClassificationTypeNoClass);
// There are only a few field classifications that are allowed.
_ASSERTE((fieldClassificationType == SystemVClassificationTypeInteger) ||
(fieldClassificationType == SystemVClassificationTypeIntegerReference) ||
(fieldClassificationType == SystemVClassificationTypeIntegerByRef) ||
(fieldClassificationType == SystemVClassificationTypeSSE));
helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField] = fieldClassificationType;
helperPtr->fieldSizes[helperPtr->currentUniqueOffsetField] = fieldSize;
helperPtr->fieldOffsets[helperPtr->currentUniqueOffsetField] = normalizedFieldOffset;
LOG((LF_JIT, LL_EVERYTHING, " %*s**** Field %d %s: offset %d (normalized %d), size %d, currentUniqueOffsetField %d, field type classification %s, chosen field classification %s\n",
nestingLevel * 5, "", fieldNum, (i == 0 ? "Value" : "Type"), fieldOffset, normalizedFieldOffset, fieldSize, helperPtr->currentUniqueOffsetField,
GetSystemVClassificationTypeName(fieldClassificationType),
GetSystemVClassificationTypeName(helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField])));
helperPtr->currentUniqueOffsetField++;
#ifdef _DEBUG
++fieldNum;
#endif // _DEBUG
}
// Both fields of the special TypedReference struct are handled.
pField = pFieldEnd;
// Done classifying the System.TypedReference struct fields.
continue;
}
if ((normalizedFieldOffset % fieldSize) != 0)
{
// The spec requires that struct values on the stack from register passed fields expects
// those fields to be at their natural alignment.
LOG((LF_JIT, LL_EVERYTHING, " %*sxxxx Field %d %s: offset %d (normalized %d), size %d not at natural alignment; not enregistering struct\n",
nestingLevel * 5, "", fieldNum, fieldNum, fieldName, fieldOffset, normalizedFieldOffset, fieldSize));
return false;
}
if ((int)normalizedFieldOffset <= helperPtr->largestFieldOffset)
{
// Find the field corresponding to this offset and update the size if needed.
// We assume that either it matches the offset of a previously seen field, or
// it is an out-of-order offset (the VM does give us structs in non-increasing
// offset order sometimes) that doesn't overlap any other field.
// REVIEW: will the offset ever match a previously seen field offset for cases that are NOT ExplicitLayout?
// If not, we can get rid of this loop, and just assume the offset is from an out-of-order field. We wouldn't
// need to maintain largestFieldOffset, either, since we would then assume all fields are unique. We could
// also get rid of ReClassifyField().
int i;
for (i = helperPtr->currentUniqueOffsetField - 1; i >= 0; i--)
{
if (helperPtr->fieldOffsets[i] == normalizedFieldOffset)
{
if (fieldSize > helperPtr->fieldSizes[i])
{
helperPtr->fieldSizes[i] = fieldSize;
}
helperPtr->fieldClassifications[i] = ReClassifyField(helperPtr->fieldClassifications[i], fieldClassificationType);
LOG((LF_JIT, LL_EVERYTHING, " %*sxxxx Field %d %s: offset %d (normalized %d), size %d, union with uniqueOffsetField %d, field type classification %s, reclassified field to %s\n",
nestingLevel * 5, "", fieldNum, fieldName, fieldOffset, normalizedFieldOffset, fieldSize, i,
GetSystemVClassificationTypeName(fieldClassificationType),
GetSystemVClassificationTypeName(helperPtr->fieldClassifications[i])));
break;
}
// Make sure the field doesn't start in the middle of another field.
_ASSERTE((normalizedFieldOffset < helperPtr->fieldOffsets[i]) ||
(normalizedFieldOffset >= helperPtr->fieldOffsets[i] + helperPtr->fieldSizes[i]));
}
if (i >= 0)
{
// The proper size of the union set of fields has been set above; continue to the next field.
continue;
}
}
else
{
helperPtr->largestFieldOffset = (int)normalizedFieldOffset;
}
// Set the data for a new field.
// The new field classification must not have been initialized yet.
_ASSERTE(helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField] == SystemVClassificationTypeNoClass);
// There are only a few field classifications that are allowed.
_ASSERTE((fieldClassificationType == SystemVClassificationTypeInteger) ||
(fieldClassificationType == SystemVClassificationTypeIntegerReference) ||
(fieldClassificationType == SystemVClassificationTypeIntegerByRef) ||
(fieldClassificationType == SystemVClassificationTypeSSE));
helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField] = fieldClassificationType;
helperPtr->fieldSizes[helperPtr->currentUniqueOffsetField] = fieldSize;
helperPtr->fieldOffsets[helperPtr->currentUniqueOffsetField] = normalizedFieldOffset;
LOG((LF_JIT, LL_EVERYTHING, " %*s**** Field %d %s: offset %d (normalized %d), size %d, currentUniqueOffsetField %d, field type classification %s, chosen field classification %s\n",
nestingLevel * 5, "", fieldNum, fieldName, fieldOffset, normalizedFieldOffset, fieldSize, helperPtr->currentUniqueOffsetField,
GetSystemVClassificationTypeName(fieldClassificationType),
GetSystemVClassificationTypeName(helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField])));
_ASSERTE(helperPtr->currentUniqueOffsetField < SYSTEMV_MAX_NUM_FIELDS_IN_REGISTER_PASSED_STRUCT);
helperPtr->currentUniqueOffsetField++;
} // end per-field for loop
AssignClassifiedEightByteTypes(helperPtr, nestingLevel);
return true;
}
// Returns 'true' if the struct is passed in registers, 'false' otherwise.
bool MethodTable::ClassifyEightBytesWithNativeLayout(SystemVStructRegisterPassingHelperPtr helperPtr,
unsigned int nestingLevel,
unsigned int startOffsetOfStruct,
bool useNativeLayout)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
// Should be in this method only doing a native layout classification.
_ASSERTE(useNativeLayout);
#ifdef DACCESS_COMPILE
// No register classification for this case.
return false;
#else // DACCESS_COMPILE
if (!HasLayout())
{
// If there is no native layout for this struct use the managed layout instead.
return ClassifyEightBytesWithManagedLayout(helperPtr, nestingLevel, startOffsetOfStruct, useNativeLayout);
}
const FieldMarshaler *pFieldMarshaler = GetLayoutInfo()->GetFieldMarshalers();
UINT numIntroducedFields = GetLayoutInfo()->GetNumCTMFields();
// No fields.
if (numIntroducedFields == 0)
{
return false;
}
// No struct register passing with explicit layout. There may be cases where explicit layout may be still
// eligible for register struct passing, but it is hard to tell the real intent. Make it simple and just
// unconditionally disable register struct passing for explicit layout.
if (GetClass()->HasExplicitFieldOffsetLayout())
{
LOG((LF_JIT, LL_EVERYTHING, "%*s**** ClassifyEightBytesWithNativeLayout: struct %s has explicit layout; will not be enregistered\n",
nestingLevel * 5, "", this->GetDebugClassName()));
return false;
}
// The SIMD Intrinsic types are meant to be handled specially and should not be passed as struct registers
if (IsIntrinsicType())
{
LPCUTF8 namespaceName;
LPCUTF8 className = GetFullyQualifiedNameInfo(&namespaceName);
if ((strcmp(className, "Vector256`1") == 0) || (strcmp(className, "Vector128`1") == 0) ||
(strcmp(className, "Vector64`1") == 0))
{
assert(strcmp(namespaceName, "System.Runtime.Intrinsics") == 0);
LOG((LF_JIT, LL_EVERYTHING, "%*s**** ClassifyEightBytesWithNativeLayout: struct %s is a SIMD intrinsic type; will not be enregistered\n",
nestingLevel * 5, "", this->GetDebugClassName()));
return false;
}
}
#ifdef _DEBUG
LOG((LF_JIT, LL_EVERYTHING, "%*s**** Classify for native struct %s (%p), startOffset %d, total struct size %d\n",
nestingLevel * 5, "", this->GetDebugClassName(), this, startOffsetOfStruct, helperPtr->structSize));
int fieldNum = -1;
#endif // _DEBUG
while (numIntroducedFields--)
{
#ifdef _DEBUG
++fieldNum;
#endif // _DEBUG
FieldDesc *pField = pFieldMarshaler->GetFieldDesc();
CorElementType fieldType = pField->GetFieldType();
// Invalid field type.
if (fieldType == ELEMENT_TYPE_END)
{
return false;
}
DWORD fieldOffset = pFieldMarshaler->GetExternalOffset();
unsigned normalizedFieldOffset = fieldOffset + startOffsetOfStruct;
unsigned int fieldNativeSize = pFieldMarshaler->NativeSize();
if (fieldNativeSize > SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES)
{
// Pass on stack in this case.
return false;
}
_ASSERTE(fieldNativeSize != (unsigned int)-1);
// The field can't span past the end of the struct.
if ((normalizedFieldOffset + fieldNativeSize) > helperPtr->structSize)
{
_ASSERTE(false && "Invalid native struct size. The size of fields and overall size don't agree");
return false;
}
SystemVClassificationType fieldClassificationType = SystemVClassificationTypeUnknown;
#ifdef _DEBUG
LPCUTF8 fieldName;
pField->GetName_NoThrow(&fieldName);
#endif // _DEBUG
// Some NStruct Field Types have extra information and require special handling
NStructFieldType cls = pFieldMarshaler->GetNStructFieldType();
if (cls == NFT_FIXEDCHARARRAYANSI)
{
fieldClassificationType = SystemVClassificationTypeInteger;
}
else if (cls == NFT_FIXEDARRAY)
{
VARTYPE vtElement = ((FieldMarshaler_FixedArray*)pFieldMarshaler)->GetElementVT();
switch (vtElement)
{
case VT_EMPTY:
case VT_NULL:
case VT_BOOL:
case VT_I1:
case VT_I2:
case VT_I4:
case VT_I8:
case VT_UI1:
case VT_UI2:
case VT_UI4:
case VT_UI8:
case VT_PTR:
case VT_INT:
case VT_UINT:
case VT_LPSTR:
case VT_LPWSTR:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
case VT_R4:
case VT_R8:
fieldClassificationType = SystemVClassificationTypeSSE;
break;
case VT_DECIMAL:
case VT_DATE:
case VT_BSTR:
case VT_UNKNOWN:
case VT_DISPATCH:
case VT_SAFEARRAY:
case VT_ERROR:
case VT_HRESULT:
case VT_CARRAY:
case VT_USERDEFINED:
case VT_RECORD:
case VT_FILETIME:
case VT_BLOB:
case VT_STREAM:
case VT_STORAGE:
case VT_STREAMED_OBJECT:
case VT_STORED_OBJECT:
case VT_BLOB_OBJECT:
case VT_CF:
case VT_CLSID:
default:
// Not supported.
return false;
}
}
#ifdef FEATURE_COMINTEROP
else if (cls == NFT_INTERFACE)
{
// COMInterop not supported for CORECLR.
_ASSERTE(false && "COMInterop not supported for CORECLR.");
return false;
}
#ifdef FEATURE_CLASSIC_COMINTEROP
else if (cls == NFT_SAFEARRAY)
{
// COMInterop not supported for CORECLR.
_ASSERTE(false && "COMInterop not supported for CORECLR.");
return false;
}
#endif // FEATURE_CLASSIC_COMINTEROP
#endif // FEATURE_COMINTEROP
else if (cls == NFT_NESTEDLAYOUTCLASS)
{
MethodTable* pFieldMT = ((FieldMarshaler_NestedLayoutClass*)pFieldMarshaler)->GetMethodTable();
bool inEmbeddedStructPrev = helperPtr->inEmbeddedStruct;
helperPtr->inEmbeddedStruct = true;
bool structRet = pFieldMT->ClassifyEightBytesWithNativeLayout(helperPtr, nestingLevel + 1, normalizedFieldOffset, useNativeLayout);
helperPtr->inEmbeddedStruct = inEmbeddedStructPrev;
if (!structRet)
{
// If the nested struct says not to enregister, there's no need to continue analyzing at this level. Just return do not enregister.
return false;
}
continue;
}
else if (cls == NFT_NESTEDVALUECLASS)
{
MethodTable* pFieldMT = ((FieldMarshaler_NestedValueClass*)pFieldMarshaler)->GetMethodTable();
bool inEmbeddedStructPrev = helperPtr->inEmbeddedStruct;
helperPtr->inEmbeddedStruct = true;
bool structRet = pFieldMT->ClassifyEightBytesWithNativeLayout(helperPtr, nestingLevel + 1, normalizedFieldOffset, useNativeLayout);
helperPtr->inEmbeddedStruct = inEmbeddedStructPrev;
if (!structRet)
{
// If the nested struct says not to enregister, there's no need to continue analyzing at this level. Just return do not enregister.
return false;
}
continue;
}
else if (cls == NFT_COPY1)
{
// The following CorElementTypes are the only ones handled with FieldMarshaler_Copy1.
switch (fieldType)
{
case ELEMENT_TYPE_I1:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
case ELEMENT_TYPE_U1:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
default:
// Invalid entry.
return false; // Pass on stack.
}
}
else if (cls == NFT_COPY2)
{
// The following CorElementTypes are the only ones handled with FieldMarshaler_Copy2.
switch (fieldType)
{
case ELEMENT_TYPE_CHAR:
case ELEMENT_TYPE_I2:
case ELEMENT_TYPE_U2:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
default:
// Invalid entry.
return false; // Pass on stack.
}
}
else if (cls == NFT_COPY4)
{
// The following CorElementTypes are the only ones handled with FieldMarshaler_Copy4.
switch (fieldType)
{
// At this point, ELEMENT_TYPE_I must be 4 bytes long. Same for ELEMENT_TYPE_U.
case ELEMENT_TYPE_I:
case ELEMENT_TYPE_I4:
case ELEMENT_TYPE_U:
case ELEMENT_TYPE_U4:
case ELEMENT_TYPE_PTR:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
case ELEMENT_TYPE_R4:
fieldClassificationType = SystemVClassificationTypeSSE;
break;
default:
// Invalid entry.
return false; // Pass on stack.
}
}
else if (cls == NFT_COPY8)
{
// The following CorElementTypes are the only ones handled with FieldMarshaler_Copy8.
switch (fieldType)
{
// At this point, ELEMENT_TYPE_I must be 8 bytes long. Same for ELEMENT_TYPE_U.
case ELEMENT_TYPE_I:
case ELEMENT_TYPE_I8:
case ELEMENT_TYPE_U:
case ELEMENT_TYPE_U8:
case ELEMENT_TYPE_PTR:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
case ELEMENT_TYPE_R8:
fieldClassificationType = SystemVClassificationTypeSSE;
break;
default:
// Invalid entry.
return false; // Pass on stack.
}
}
else if (cls == NFT_FIXEDSTRINGUNI)
{
fieldClassificationType = SystemVClassificationTypeInteger;
}
else if (cls == NFT_FIXEDSTRINGANSI)
{
fieldClassificationType = SystemVClassificationTypeInteger;
}
else
{
// All other NStruct Field Types which do not require special handling.
switch (cls)
{
#ifdef FEATURE_COMINTEROP
case NFT_BSTR:
case NFT_HSTRING:
case NFT_VARIANT:
case NFT_VARIANTBOOL:
case NFT_CURRENCY:
// COMInterop not supported for CORECLR.
_ASSERTE(false && "COMInterop not supported for CORECLR.");
return false;
#endif // FEATURE_COMINTEROP
case NFT_STRINGUNI:
case NFT_STRINGANSI:
case NFT_ANSICHAR:
case NFT_STRINGUTF8:
case NFT_WINBOOL:
case NFT_CBOOL:
case NFT_DELEGATE:
case NFT_SAFEHANDLE:
case NFT_CRITICALHANDLE:
fieldClassificationType = SystemVClassificationTypeInteger;
break;
// It's not clear what the right behavior for NTF_DECIMAL and NTF_DATE is
// But those two types would only make sense on windows. We can revisit this later
case NFT_DECIMAL:
case NFT_DATE:
case NFT_ILLEGAL:
default:
return false;
}
}
if ((normalizedFieldOffset % fieldNativeSize) != 0)
{
// The spec requires that struct values on the stack from register passed fields expects
// those fields to be at their natural alignment.
LOG((LF_JIT, LL_EVERYTHING, " %*sxxxx Native Field %d %s: offset %d (normalized %d), native size %d not at natural alignment; not enregistering struct\n",
nestingLevel * 5, "", fieldNum, fieldNum, fieldName, fieldOffset, normalizedFieldOffset, fieldNativeSize));
return false;
}
if ((int)normalizedFieldOffset <= helperPtr->largestFieldOffset)
{
// Find the field corresponding to this offset and update the size if needed.
// We assume that either it matches the offset of a previously seen field, or
// it is an out-of-order offset (the VM does give us structs in non-increasing
// offset order sometimes) that doesn't overlap any other field.
int i;
for (i = helperPtr->currentUniqueOffsetField - 1; i >= 0; i--)
{
if (helperPtr->fieldOffsets[i] == normalizedFieldOffset)
{
if (fieldNativeSize > helperPtr->fieldSizes[i])
{
helperPtr->fieldSizes[i] = fieldNativeSize;
}
helperPtr->fieldClassifications[i] = ReClassifyField(helperPtr->fieldClassifications[i], fieldClassificationType);
LOG((LF_JIT, LL_EVERYTHING, " %*sxxxx Native Field %d %s: offset %d (normalized %d), native size %d, union with uniqueOffsetField %d, field type classification %s, reclassified field to %s\n",
nestingLevel * 5, "", fieldNum, fieldName, fieldOffset, normalizedFieldOffset, fieldNativeSize, i,
GetSystemVClassificationTypeName(fieldClassificationType),
GetSystemVClassificationTypeName(helperPtr->fieldClassifications[i])));
break;
}
// Make sure the field doesn't start in the middle of another field.
_ASSERTE((normalizedFieldOffset < helperPtr->fieldOffsets[i]) ||
(normalizedFieldOffset >= helperPtr->fieldOffsets[i] + helperPtr->fieldSizes[i]));
}
if (i >= 0)
{
// The proper size of the union set of fields has been set above; continue to the next field.
continue;
}
}
else
{
helperPtr->largestFieldOffset = (int)normalizedFieldOffset;
}
// Set the data for a new field.
// The new field classification must not have been initialized yet.
_ASSERTE(helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField] == SystemVClassificationTypeNoClass);
// There are only a few field classifications that are allowed.
_ASSERTE((fieldClassificationType == SystemVClassificationTypeInteger) ||
(fieldClassificationType == SystemVClassificationTypeIntegerReference) ||
(fieldClassificationType == SystemVClassificationTypeIntegerByRef) ||
(fieldClassificationType == SystemVClassificationTypeSSE));
helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField] = fieldClassificationType;
helperPtr->fieldSizes[helperPtr->currentUniqueOffsetField] = fieldNativeSize;
helperPtr->fieldOffsets[helperPtr->currentUniqueOffsetField] = normalizedFieldOffset;
LOG((LF_JIT, LL_EVERYTHING, " %*s**** Native Field %d %s: offset %d (normalized %d), size %d, currentUniqueOffsetField %d, field type classification %s, chosen field classification %s\n",
nestingLevel * 5, "", fieldNum, fieldName, fieldOffset, normalizedFieldOffset, fieldNativeSize, helperPtr->currentUniqueOffsetField,
GetSystemVClassificationTypeName(fieldClassificationType),
GetSystemVClassificationTypeName(helperPtr->fieldClassifications[helperPtr->currentUniqueOffsetField])));
_ASSERTE(helperPtr->currentUniqueOffsetField < SYSTEMV_MAX_NUM_FIELDS_IN_REGISTER_PASSED_STRUCT);
helperPtr->currentUniqueOffsetField++;
((BYTE*&)pFieldMarshaler) += MAXFIELDMARSHALERSIZE;
} // end per-field for loop
AssignClassifiedEightByteTypes(helperPtr, nestingLevel);
return true;
#endif // DACCESS_COMPILE
}
// Assigns the classification types to the array with eightbyte types.
void MethodTable::AssignClassifiedEightByteTypes(SystemVStructRegisterPassingHelperPtr helperPtr, unsigned int nestingLevel) const
{
static const size_t CLR_SYSTEMV_MAX_BYTES_TO_PASS_IN_REGISTERS = CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS * SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES;
static_assert_no_msg(CLR_SYSTEMV_MAX_BYTES_TO_PASS_IN_REGISTERS == SYSTEMV_MAX_NUM_FIELDS_IN_REGISTER_PASSED_STRUCT);
if (!helperPtr->inEmbeddedStruct)
{
_ASSERTE(nestingLevel == 0);
int largestFieldOffset = helperPtr->largestFieldOffset;
_ASSERTE(largestFieldOffset != -1);
// We're at the top level of the recursion, and we're done looking at the fields.
// Now sort the fields by offset and set the output data.
int sortedFieldOrder[CLR_SYSTEMV_MAX_BYTES_TO_PASS_IN_REGISTERS];
for (unsigned i = 0; i < CLR_SYSTEMV_MAX_BYTES_TO_PASS_IN_REGISTERS; i++)
{
sortedFieldOrder[i] = -1;
}
unsigned numFields = helperPtr->currentUniqueOffsetField;
for (unsigned i = 0; i < numFields; i++)
{
_ASSERTE(helperPtr->fieldOffsets[i] < CLR_SYSTEMV_MAX_BYTES_TO_PASS_IN_REGISTERS);
_ASSERTE(sortedFieldOrder[helperPtr->fieldOffsets[i]] == -1); // we haven't seen this field offset yet.
sortedFieldOrder[helperPtr->fieldOffsets[i]] = i;
}
// Calculate the eightbytes and their types.
unsigned int accumulatedSizeForEightByte = 0;
unsigned int currentEightByteOffset = 0;
unsigned int currentEightByte = 0;
int lastFieldOrdinal = sortedFieldOrder[largestFieldOffset];
unsigned int offsetAfterLastFieldByte = largestFieldOffset + helperPtr->fieldSizes[lastFieldOrdinal];
SystemVClassificationType lastFieldClassification = helperPtr->fieldClassifications[lastFieldOrdinal];
unsigned offset = 0;
for (unsigned fieldSize = 0; offset < helperPtr->structSize; offset += fieldSize)
{
SystemVClassificationType fieldClassificationType;
int ordinal = sortedFieldOrder[offset];
if (ordinal == -1)
{
// If there is no field that starts as this offset, treat its contents as padding.
// Any padding that follows the last field receives the same classification as the
// last field; padding between fields receives the NO_CLASS classification as per
// the SysV ABI spec.
fieldSize = 1;
fieldClassificationType = offset < offsetAfterLastFieldByte ? SystemVClassificationTypeNoClass : lastFieldClassification;
}
else
{
fieldSize = helperPtr->fieldSizes[ordinal];
_ASSERTE(fieldSize > 0 && fieldSize <= SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES);
fieldClassificationType = helperPtr->fieldClassifications[ordinal];
_ASSERTE(fieldClassificationType != SystemVClassificationTypeMemory && fieldClassificationType != SystemVClassificationTypeUnknown);
}
if (helperPtr->eightByteClassifications[currentEightByte] == fieldClassificationType)
{
// Do nothing. The eight-byte already has this classification.
}
else if (helperPtr->eightByteClassifications[currentEightByte] == SystemVClassificationTypeNoClass)
{
helperPtr->eightByteClassifications[currentEightByte] = fieldClassificationType;
}
else if ((helperPtr->eightByteClassifications[currentEightByte] == SystemVClassificationTypeInteger) ||
(fieldClassificationType == SystemVClassificationTypeInteger))
{
_ASSERTE((fieldClassificationType != SystemVClassificationTypeIntegerReference) &&
(fieldClassificationType != SystemVClassificationTypeIntegerByRef));
helperPtr->eightByteClassifications[currentEightByte] = SystemVClassificationTypeInteger;
}
else if ((helperPtr->eightByteClassifications[currentEightByte] == SystemVClassificationTypeIntegerReference) ||
(fieldClassificationType == SystemVClassificationTypeIntegerReference))
{
helperPtr->eightByteClassifications[currentEightByte] = SystemVClassificationTypeIntegerReference;
}
else if ((helperPtr->eightByteClassifications[currentEightByte] == SystemVClassificationTypeIntegerByRef) ||
(fieldClassificationType == SystemVClassificationTypeIntegerByRef))
{
helperPtr->eightByteClassifications[currentEightByte] = SystemVClassificationTypeIntegerByRef;
}
else
{
helperPtr->eightByteClassifications[currentEightByte] = SystemVClassificationTypeSSE;
}
accumulatedSizeForEightByte += fieldSize;
if (accumulatedSizeForEightByte == SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES)
{
// Save data for this eightbyte.
helperPtr->eightByteSizes[currentEightByte] = SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES;
helperPtr->eightByteOffsets[currentEightByte] = currentEightByteOffset;
// Set up for next eightbyte.
currentEightByte++;
_ASSERTE(currentEightByte <= CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS);
currentEightByteOffset = offset + fieldSize;
accumulatedSizeForEightByte = 0;
}
_ASSERTE(accumulatedSizeForEightByte < SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES);
}
// Handle structs that end in the middle of an eightbyte.
if (accumulatedSizeForEightByte > 0 && accumulatedSizeForEightByte < SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES)
{
_ASSERTE((helperPtr->structSize % SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES) != 0);
helperPtr->eightByteSizes[currentEightByte] = accumulatedSizeForEightByte;
helperPtr->eightByteOffsets[currentEightByte] = currentEightByteOffset;
currentEightByte++;
}
helperPtr->eightByteCount = currentEightByte;
_ASSERTE(helperPtr->eightByteCount <= CLR_SYSTEMV_MAX_EIGHTBYTES_COUNT_TO_PASS_IN_REGISTERS);
#ifdef _DEBUG
LOG((LF_JIT, LL_EVERYTHING, " ----\n"));
LOG((LF_JIT, LL_EVERYTHING, " **** Number EightBytes: %d\n", helperPtr->eightByteCount));
for (unsigned i = 0; i < helperPtr->eightByteCount; i++)
{
LOG((LF_JIT, LL_EVERYTHING, " **** eightByte %d -- classType: %s, eightByteOffset: %d, eightByteSize: %d\n",
i, GetSystemVClassificationTypeName(helperPtr->eightByteClassifications[i]), helperPtr->eightByteOffsets[i], helperPtr->eightByteSizes[i]));
}
#endif // _DEBUG
}
}
#endif // defined(UNIX_AMD64_ABI_ITF)
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
//==========================================================================================
void MethodTable::AllocateRegularStaticBoxes()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
PRECONDITION(!ContainsGenericVariables());
PRECONDITION(HasBoxedRegularStatics());
MODE_ANY;
}
CONTRACTL_END;
LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Instantiating static handles for %s\n", GetDebugClassName()));
GCX_COOP();
PTR_BYTE pStaticBase = GetGCStaticsBasePointer();
GCPROTECT_BEGININTERIOR(pStaticBase);
// In ngened case, we have cached array with boxed statics MTs. In JITed case, we have just the FieldDescs
ClassCtorInfoEntry *pClassCtorInfoEntry = GetClassCtorInfoIfExists();
if (pClassCtorInfoEntry != NULL)
{
OBJECTREF* pStaticSlots = (OBJECTREF*)(pStaticBase + pClassCtorInfoEntry->firstBoxedStaticOffset);
GCPROTECT_BEGININTERIOR(pStaticSlots);
ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppMTs = GetLoaderModule()->GetZapModuleCtorInfo()->
GetGCStaticMTs(pClassCtorInfoEntry->firstBoxedStaticMTIndex);
DWORD numBoxedStatics = pClassCtorInfoEntry->numBoxedStatics;
for (DWORD i = 0; i < numBoxedStatics; i++)
{
#ifdef FEATURE_PREJIT
Module::RestoreMethodTablePointer(&(ppMTs[i]), GetLoaderModule());
#endif
MethodTable *pFieldMT = ppMTs[i].GetValue();
_ASSERTE(pFieldMT);
LOG((LF_CLASSLOADER, LL_INFO10000, "\tInstantiating static of type %s\n", pFieldMT->GetDebugClassName()));
OBJECTREF obj = AllocateStaticBox(pFieldMT, pClassCtorInfoEntry->hasFixedAddressVTStatics);
SetObjectReference( &(pStaticSlots[i]), obj, GetAppDomain() );
}
GCPROTECT_END();
}
else
{
// We should never take this codepath in zapped images.
_ASSERTE(!IsZapped());
FieldDesc *pField = HasGenericsStaticsInfo() ?
GetGenericsStaticFieldDescs() : (GetApproxFieldDescListRaw() + GetNumIntroducedInstanceFields());
FieldDesc *pFieldEnd = pField + GetNumStaticFields();
while (pField < pFieldEnd)
{
_ASSERTE(pField->IsStatic());
if (!pField->IsSpecialStatic() && pField->IsByValue())
{
TypeHandle th = pField->GetFieldTypeHandleThrowing();
MethodTable* pFieldMT = th.GetMethodTable();
LOG((LF_CLASSLOADER, LL_INFO10000, "\tInstantiating static of type %s\n", pFieldMT->GetDebugClassName()));
OBJECTREF obj = AllocateStaticBox(pFieldMT, HasFixedAddressVTStatics());
SetObjectReference( (OBJECTREF*)(pStaticBase + pField->GetOffset()), obj, GetAppDomain() );
}
pField++;
}
}
GCPROTECT_END();
}
//==========================================================================================
OBJECTREF MethodTable::AllocateStaticBox(MethodTable* pFieldMT, BOOL fPinned, OBJECTHANDLE* pHandle)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
CONTRACTL_END;
}
_ASSERTE(pFieldMT->IsValueType());
// Activate any dependent modules if necessary
pFieldMT->EnsureInstanceActive();
OBJECTREF obj = AllocateObject(pFieldMT);
// Pin the object if necessary
if (fPinned)
{
LOG((LF_CLASSLOADER, LL_INFO10000, "\tSTATICS:Pinning static (VT fixed address attribute) of type %s\n", pFieldMT->GetDebugClassName()));
OBJECTHANDLE oh = GetAppDomain()->CreatePinningHandle(obj);
if (pHandle)
{
*pHandle = oh;
}
}
else
{
if (pHandle)
{
*pHandle = NULL;
}
}
return obj;
}
//==========================================================================================
BOOL MethodTable::RunClassInitEx(OBJECTREF *pThrowable)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(IsFullyLoaded());
PRECONDITION(IsProtectedByGCFrame(pThrowable));
}
CONTRACTL_END;
// A somewhat unusual function, can both return throwable and throw.
// The difference is, we throw on restartable operations and just return throwable
// on exceptions fatal for the .cctor
// (Of course in the latter case the caller is supposed to throw pThrowable)
// Doing the opposite ( i.e. throwing on fatal and returning on nonfatal)
// would be more intuitive but it's more convenient the way it is
BOOL fRet = FALSE;
// During the <clinit>, this thread must not be asynchronously
// stopped or interrupted. That would leave the class unavailable
// and is therefore a security hole. We don't have to worry about
// multithreading, since we only manipulate the current thread's count.
ThreadPreventAsyncHolder preventAsync;
// If the static initialiser throws an exception that it doesn't catch, it has failed
EX_TRY
{
// Activate our module if necessary
EnsureInstanceActive();
STRESS_LOG1(LF_CLASSLOADER, LL_INFO1000, "RunClassInit: Calling class contructor for type %pT\n", this);
MethodTable * pCanonMT = GetCanonicalMethodTable();
// Call the code method without touching MethodDesc if possible
PCODE pCctorCode = pCanonMT->GetSlot(pCanonMT->GetClassConstructorSlot());
if (pCanonMT->IsSharedByGenericInstantiations())
{
PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCctorCode);
DECLARE_ARGHOLDER_ARRAY(args, 1);
args[ARGNUM_0] = PTR_TO_ARGHOLDER(this);
CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
CALL_MANAGED_METHOD_NORET(args);
}
else
{
PREPARE_NONVIRTUAL_CALLSITE_USING_CODE(pCctorCode);
DECLARE_ARGHOLDER_ARRAY(args, 0);
CATCH_HANDLER_FOUND_NOTIFICATION_CALLSITE;
CALL_MANAGED_METHOD_NORET(args);
}
STRESS_LOG1(LF_CLASSLOADER, LL_INFO100000, "RunClassInit: Returned Successfully from class contructor for type %pT\n", this);
fRet = TRUE;
}
EX_CATCH
{
// Exception set by parent
// <TODO>@TODO: We should make this an ExceptionInInitializerError if the exception thrown is not
// a subclass of Error</TODO>
*pThrowable = GET_THROWABLE();
_ASSERTE(fRet == FALSE);
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
// If active thread state does not have a CorruptionSeverity set for the exception,
// then set one up based upon the current exception code and/or the throwable.
//
// When can we be here and current exception tracker may not have corruption severity set?
// Incase of SO in managed code, SO is never seen by CLR's exception handler for managed code
// and if this happens in cctor, we can end up here without the corruption severity set.
Thread *pThread = GetThread();
_ASSERTE(pThread != NULL);
ThreadExceptionState *pCurTES = pThread->GetExceptionState();
_ASSERTE(pCurTES != NULL);
if (pCurTES->GetLastActiveExceptionCorruptionSeverity() == NotSet)
{
if (CEHelper::IsProcessCorruptedStateException(GetCurrentExceptionCode()) ||
CEHelper::IsProcessCorruptedStateException(*pThrowable))
{
// Process Corrupting
pCurTES->SetLastActiveExceptionCorruptionSeverity(ProcessCorrupting);
LOG((LF_EH, LL_INFO100, "MethodTable::RunClassInitEx - Exception treated as ProcessCorrupting.\n"));
}
else
{
// Not Corrupting
pCurTES->SetLastActiveExceptionCorruptionSeverity(NotCorrupting);
LOG((LF_EH, LL_INFO100, "MethodTable::RunClassInitEx - Exception treated as non-corrupting.\n"));
}
}
else
{
LOG((LF_EH, LL_INFO100, "MethodTable::RunClassInitEx - Exception already has corruption severity set.\n"));
}
#endif // FEATURE_CORRUPTING_EXCEPTIONS
}
EX_END_CATCH(SwallowAllExceptions)
return fRet;
}
//==========================================================================================
void MethodTable::DoRunClassInitThrowing()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
SO_TOLERANT;
}
CONTRACTL_END;
GCX_COOP();
// This is a fairly aggressive policy. Merely asking that the class be initialized is grounds for kicking you out.
// Alternately, we could simply NOP out the class initialization. Since the aggressive policy is also the more secure
// policy, keep this unless it proves intractable to remove all premature classinits in the system.
EnsureActive();
Thread *pThread;
pThread = GetThread();
_ASSERTE(pThread);
INTERIOR_STACK_PROBE_FOR(pThread, 8);
AppDomain *pDomain = GetAppDomain();
HRESULT hrResult = E_FAIL;
const char *description;
STRESS_LOG2(LF_CLASSLOADER, LL_INFO100000, "DoRunClassInit: Request to init %pT in appdomain %p\n", this, pDomain);
//
// Take the global lock
//
ListLock *_pLock = pDomain->GetClassInitLock();
ListLockHolder pInitLock(_pLock);
// Check again
if (IsClassInited())
goto Exit;
//
// Handle cases where the .cctor has already tried to run but failed.
//
if (IsInitError())
{
// Some error occurred trying to init this class
ListLockEntry* pEntry= (ListLockEntry *) _pLock->Find(this);
_ASSERTE(pEntry!=NULL);
_ASSERTE(pEntry->m_pLoaderAllocator == (GetDomain()->IsSharedDomain() ? pDomain->GetLoaderAllocator() : GetLoaderAllocator()));
// If this isn't a TypeInitializationException, then its creation failed
// somehow previously, so we should make one last attempt to create it. If
// that fails, just throw the exception that was originally thrown.
// Primarily, this deals with the problem that the exception is a
// ThreadAbortException, because this must be executing on a different
// thread. If in fact this thread is also aborting, then rethrowing the
// other thread's exception will not do any worse.
// If we need to create the type init exception object, we'll need to
// GC protect these, so might as well create the structure now.
struct _gc {
OBJECTREF pInitException;
OBJECTREF pNewInitException;
OBJECTREF pThrowable;
} gc;
gc.pInitException = pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException);
gc.pNewInitException = NULL;
gc.pThrowable = NULL;
GCPROTECT_BEGIN(gc);
// We need to release this lock because CreateTypeInitializationExceptionObject and fetching the TypeLoad exception can cause
// managed code to re-enter into this codepath, causing a locking order violation.
pInitLock.Release();
if (MscorlibBinder::GetException(kTypeInitializationException) != gc.pInitException->GetMethodTable())
{
DefineFullyQualifiedNameForClassWOnStack();
LPCWSTR wszName = GetFullyQualifiedNameForClassW(this);
CreateTypeInitializationExceptionObject(wszName, &gc.pInitException, &gc.pNewInitException, &gc.pThrowable);
LOADERHANDLE hOrigInitException = pEntry->m_hInitException;
if (!CLRException::IsPreallocatedExceptionObject(pEntry->m_pLoaderAllocator->GetHandleValue(hOrigInitException)))
{
// Now put the new init exception in the handle. If another thread beat us (because we released the
// lock above), then we'll just let the extra init exception object get collected later.
pEntry->m_pLoaderAllocator->CompareExchangeValueInHandle(pEntry->m_hInitException, gc.pNewInitException, gc.pInitException);
} else {
// if the stored exception is a preallocated one we cannot store the new Exception object in it.
// we'll attempt to create a new handle for the new TypeInitializationException object
LOADERHANDLE hNewInitException = NULL;
// CreateHandle can throw due to OOM. We need to catch this so that we make sure to set the
// init error. Whatever exception was thrown will be rethrown below, so no worries.
EX_TRY {
hNewInitException = pEntry->m_pLoaderAllocator->AllocateHandle(gc.pNewInitException);
} EX_CATCH {
// If we failed to create the handle we'll just leave the originally alloc'd one in place.
} EX_END_CATCH(SwallowAllExceptions);
// if two threads are racing to set m_hInitException, clear the handle created by the loser
if (hNewInitException != NULL &&
InterlockedCompareExchangeT((&pEntry->m_hInitException), hNewInitException, hOrigInitException) != hOrigInitException)
{
pEntry->m_pLoaderAllocator->ClearHandle(hNewInitException);
}
}
}
else {
gc.pThrowable = gc.pInitException;
}
GCPROTECT_END();
// Throw the saved exception. Since we may be rethrowing a previously cached exception, must clear the stack trace first.
// Rethrowing a previously cached exception is distasteful but is required for appcompat with Everett.
//
// (The IsException() is probably more appropriate as an assert but as this isn't a heavily tested code path,
// I prefer to be defensive here.)
if (IsException(gc.pThrowable->GetMethodTable()))
{
((EXCEPTIONREF)(gc.pThrowable))->ClearStackTraceForThrow();
}
// <FEATURE_CORRUPTING_EXCEPTIONS>
// Specify the corruption severity to be used to raise this exception in COMPlusThrow below.
// This will ensure that when the exception is seen by the managed code personality routine,
// it will setup the correct corruption severity in the exception tracker.
// </FEATURE_CORRUPTING_EXCEPTIONS>
COMPlusThrow(gc.pThrowable
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, pEntry->m_CorruptionSeverity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
);
}
description = ".cctor lock";
#ifdef _DEBUG
description = GetDebugClassName();
#endif
// Take the lock
{
//nontrivial holder, might take a lock in destructor
ListLockEntryHolder pEntry(ListLockEntry::Find(pInitLock, this, description));
ListLockEntryLockHolder pLock(pEntry, FALSE);
// We have a list entry, we can release the global lock now
pInitLock.Release();
if (pLock.DeadlockAwareAcquire())
{
if (pEntry->m_hrResultCode == S_FALSE)
{
if (!NingenEnabled())
{
if (HasBoxedRegularStatics())
{
// First, instantiate any objects needed for value type statics
AllocateRegularStaticBoxes();
}
// Nobody has run the .cctor yet
if (HasClassConstructor())
{
struct _gc {
OBJECTREF pInnerException;
OBJECTREF pInitException;
OBJECTREF pThrowable;
} gc;
gc.pInnerException = NULL;
gc.pInitException = NULL;
gc.pThrowable = NULL;
GCPROTECT_BEGIN(gc);
if (!RunClassInitEx(&gc.pInnerException))
{
// The .cctor failed and we want to store the exception that resulted
// in the entry. Increment the ref count to keep the entry alive for
// subsequent attempts to run the .cctor.
pEntry->AddRef();
// For collectible types, register the entry for cleanup.
if (GetLoaderAllocator()->IsCollectible())
{
GetLoaderAllocator()->RegisterFailedTypeInitForCleanup(pEntry);
}
_ASSERTE(g_pThreadAbortExceptionClass == MscorlibBinder::GetException(kThreadAbortException));
if(gc.pInnerException->GetMethodTable() == g_pThreadAbortExceptionClass)
{
gc.pThrowable = gc.pInnerException;
gc.pInitException = gc.pInnerException;
gc.pInnerException = NULL;
}
else
{
DefineFullyQualifiedNameForClassWOnStack();
LPCWSTR wszName = GetFullyQualifiedNameForClassW(this);
// Note that this may not succeed due to problems creating the exception
// object. On failure, it will first try to
CreateTypeInitializationExceptionObject(
wszName, &gc.pInnerException, &gc.pInitException, &gc.pThrowable);
}
pEntry->m_pLoaderAllocator = GetDomain()->IsSharedDomain() ? pDomain->GetLoaderAllocator() : GetLoaderAllocator();
// CreateHandle can throw due to OOM. We need to catch this so that we make sure to set the
// init error. Whatever exception was thrown will be rethrown below, so no worries.
EX_TRY {
// Save the exception object, and return to caller as well.
pEntry->m_hInitException = pEntry->m_pLoaderAllocator->AllocateHandle(gc.pInitException);
} EX_CATCH {
// If we failed to create the handle (due to OOM), we'll just store the preallocated OOM
// handle here instead.
pEntry->m_hInitException = pEntry->m_pLoaderAllocator->AllocateHandle(CLRException::GetPreallocatedOutOfMemoryException());
} EX_END_CATCH(SwallowAllExceptions);
pEntry->m_hrResultCode = E_FAIL;
SetClassInitError();
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
// Save the corruption severity of the exception so that if the type system
// attempts to pick it up from its cache list and throw again, it should
// treat the exception as corrupting, if applicable.
pEntry->m_CorruptionSeverity = pThread->GetExceptionState()->GetLastActiveExceptionCorruptionSeverity();
// We should be having a valid corruption severity at this point
_ASSERTE(pEntry->m_CorruptionSeverity != NotSet);
#endif // FEATURE_CORRUPTING_EXCEPTIONS
COMPlusThrow(gc.pThrowable
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
, pEntry->m_CorruptionSeverity
#endif // FEATURE_CORRUPTING_EXCEPTIONS
);
}
GCPROTECT_END();
}
}
pEntry->m_hrResultCode = S_OK;
// Set the initialization flags in the DLS and on domain-specific types.
// Note we also set the flag for dynamic statics, which use the DynamicStatics part
// of the DLS irrespective of whether the type is domain neutral or not.
SetClassInited();
}
else
{
// Use previous result
hrResult = pEntry->m_hrResultCode;
if(FAILED(hrResult))
{
// An exception may have occurred in the cctor. DoRunClassInit() should return FALSE in that
// case.
_ASSERTE(pEntry->m_hInitException);
_ASSERTE(pEntry->m_pLoaderAllocator == (GetDomain()->IsSharedDomain() ? pDomain->GetLoaderAllocator() : GetLoaderAllocator()));
_ASSERTE(IsInitError());
// Throw the saved exception. Since we are rethrowing a previously cached exception, must clear the stack trace first.
// Rethrowing a previously cached exception is distasteful but is required for appcompat with Everett.
//
// (The IsException() is probably more appropriate as an assert but as this isn't a heavily tested code path,
// I prefer to be defensive here.)
if (IsException(pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException)->GetMethodTable()))
{
((EXCEPTIONREF)(pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException)))->ClearStackTraceForThrow();
}
COMPlusThrow(pEntry->m_pLoaderAllocator->GetHandleValue(pEntry->m_hInitException));
}
}
}
}
//
// Notify any entries waiting on the current entry and wait for the required entries.
//
// We need to take the global lock before we play with the list of entries.
STRESS_LOG2(LF_CLASSLOADER, LL_INFO100000, "DoRunClassInit: returning SUCCESS for init %pT in appdomain %p\n", this, pDomain);
// No need to set pThrowable in case of error it will already have been set.
g_IBCLogger.LogMethodTableAccess(this);
Exit:
;
END_INTERIOR_STACK_PROBE;
}
//==========================================================================================
void MethodTable::CheckRunClassInitThrowing()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
SO_TOLERANT;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(IsFullyLoaded());
}
CONTRACTL_END;
{ // Debug-only code causes SO volation, so add exception.
CONTRACT_VIOLATION(SOToleranceViolation);
CONSISTENCY_CHECK(CheckActivated());
}
// To find GC hole easier...
TRIGGERSGC();
if (IsClassPreInited())
return;
// Don't initialize shared generic instantiations (e.g. MyClass<__Canon>)
if (IsSharedByGenericInstantiations())
return;
DomainLocalModule *pLocalModule = GetDomainLocalModule();
_ASSERTE(pLocalModule);
DWORD iClassIndex = GetClassIndex();
// Check to see if we have already run the .cctor for this class.
if (!pLocalModule->IsClassAllocated(this, iClassIndex))
pLocalModule->PopulateClass(this);
if (!pLocalModule->IsClassInitialized(this, iClassIndex))
DoRunClassInitThrowing();
}
//==========================================================================================
void MethodTable::CheckRunClassInitAsIfConstructingThrowing()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
if (HasPreciseInitCctors())
{
MethodTable *pMTCur = this;
while (pMTCur != NULL)
{
if (!pMTCur->GetClass()->IsBeforeFieldInit())
pMTCur->CheckRunClassInitThrowing();
pMTCur = pMTCur->GetParentMethodTable();
}
}
}
//==========================================================================================
OBJECTREF MethodTable::Allocate()
{
CONTRACTL
{
MODE_COOPERATIVE;
GC_TRIGGERS;
THROWS;
}
CONTRACTL_END;
CONSISTENCY_CHECK(IsFullyLoaded());
EnsureInstanceActive();
if (HasPreciseInitCctors())
{
CheckRunClassInitAsIfConstructingThrowing();
}
return AllocateObject(this);
}
//==========================================================================================
// box 'data' creating a new object and return it. This routine understands the special
// handling needed for Nullable values.
// see code:Nullable#NullableVerification
OBJECTREF MethodTable::Box(void* data)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(IsValueType());
}
CONTRACTL_END;
OBJECTREF ref;
GCPROTECT_BEGININTERIOR (data);
if (IsByRefLike())
{
// We should never box a type that contains stack pointers.
COMPlusThrow(kInvalidOperationException, W("InvalidOperation_TypeCannotBeBoxed"));
}
ref = FastBox(&data);
GCPROTECT_END ();
return ref;
}
OBJECTREF MethodTable::FastBox(void** data)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(IsValueType());
}
CONTRACTL_END;
// See code:Nullable#NullableArchitecture for more
if (IsNullable())
return Nullable::Box(*data, this);
OBJECTREF ref = Allocate();
CopyValueClass(ref->UnBox(), *data, this, ref->GetAppDomain());
return ref;
}
#if _TARGET_X86_ || _TARGET_AMD64_
//==========================================================================================
static void FastCallFinalize(Object *obj, PCODE funcPtr, BOOL fCriticalCall)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_SO_INTOLERANT;
BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault);
#if defined(_TARGET_X86_)
__asm
{
mov ecx, [obj]
call [funcPtr]
INDEBUG(nop) // Mark the fact that we can call managed code
}
#else // _TARGET_X86_
FastCallFinalizeWorker(obj, funcPtr);
#endif // _TARGET_X86_
END_CALL_TO_MANAGED();
}
#endif // _TARGET_X86_ || _TARGET_AMD64_
void CallFinalizerOnThreadObject(Object *obj)
{
STATIC_CONTRACT_MODE_COOPERATIVE;
THREADBASEREF refThis = (THREADBASEREF)ObjectToOBJECTREF(obj);
Thread* thread = refThis->GetInternal();
// Prevent multiple calls to Finalize
// Objects can be resurrected after being finalized. However, there is no
// race condition here. We always check whether an exposed thread object is
// still attached to the internal Thread object, before proceeding.
if (thread)
{
refThis->SetDelegate(NULL);
// During process shutdown, we finalize even reachable objects. But if we break
// the link between the System.Thread and the internal Thread object, the runtime
// may not work correctly. In particular, we won't be able to transition between
// contexts and domains to finalize other objects. Since the runtime doesn't
// require that Threads finalize during shutdown, we need to disable this. If
// we wait until phase 2 of shutdown finalization (when the EE is suspended and
// will never resume) then we can simply skip the side effects of Thread
// finalization.
if ((g_fEEShutDown & ShutDown_Finalize2) == 0)
{
if (GetThread() != thread)
{
refThis->ClearInternal();
}
FastInterlockOr ((ULONG *)&thread->m_State, Thread::TS_Finalized);
Thread::SetCleanupNeededForFinalizedThread();
}
}
}
//==========================================================================================