Permalink
Fetching contributors…
Cannot retrieve contributors at this time
14366 lines (12051 sloc) 472 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ===========================================================================
// File: CEELOAD.CPP
//
//
// CEELOAD reads in the PE file format using LoadLibrary
// ===========================================================================
#include "common.h"
#include "array.h"
#include "ceeload.h"
#include "hash.h"
#include "vars.hpp"
#include "reflectclasswriter.h"
#include "method.hpp"
#include "stublink.h"
#include "cgensys.h"
#include "excep.h"
#include "dbginterface.h"
#include "dllimport.h"
#include "eeprofinterfaces.h"
#include "perfcounters.h"
#include "encee.h"
#include "jitinterface.h"
#include "eeconfig.h"
#include "dllimportcallback.h"
#include "contractimpl.h"
#include "typehash.h"
#include "instmethhash.h"
#include "virtualcallstub.h"
#include "typestring.h"
#include "stringliteralmap.h"
#include <formattype.h>
#include "fieldmarshaler.h"
#include "sigbuilder.h"
#include "metadataexports.h"
#include "inlinetracking.h"
#ifdef FEATURE_PREJIT
#include "exceptionhandling.h"
#include "corcompile.h"
#include "compile.h"
#include "nibblestream.h"
#include "zapsig.h"
#endif //FEATURE_PREJIT
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#include "comcallablewrapper.h"
#endif //FEATURE_COMINTEROP
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4724)
#endif // _MSC_VER
#include "ngenhash.inl"
#ifdef _MSC_VER
#pragma warning(pop)
#endif // _MSC_VER
#include "perflog.h"
#include "ecall.h"
#include "../md/compiler/custattr.h"
#include "typekey.h"
#include "peimagelayout.inl"
#include "ildbsymlib.h"
#if defined(PROFILING_SUPPORTED)
#include "profilermetadataemitvalidator.h"
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4244)
#endif // _MSC_VER
#ifdef _TARGET_64BIT_
#define COR_VTABLE_PTRSIZED COR_VTABLE_64BIT
#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_32BIT
#else // !_TARGET_64BIT_
#define COR_VTABLE_PTRSIZED COR_VTABLE_32BIT
#define COR_VTABLE_NOT_PTRSIZED COR_VTABLE_64BIT
#endif // !_TARGET_64BIT_
#define CEE_FILE_GEN_GROWTH_COLLECTIBLE 2048
#define NGEN_STATICS_ALLCLASSES_WERE_LOADED -1
BOOL Module::HasInlineTrackingMap()
{
LIMITED_METHOD_DAC_CONTRACT;
#ifdef FEATURE_READYTORUN
if (IsReadyToRun() && GetReadyToRunInfo()->GetInlineTrackingMap() != NULL)
{
return TRUE;
}
#endif
return (m_pPersistentInlineTrackingMapNGen != NULL);
}
COUNT_T Module::GetInliners(PTR_Module inlineeOwnerMod, mdMethodDef inlineeTkn, COUNT_T inlinersSize, MethodInModule inliners[], BOOL *incompleteData)
{
WRAPPER_NO_CONTRACT;
#ifdef FEATURE_READYTORUN
if(IsReadyToRun() && GetReadyToRunInfo()->GetInlineTrackingMap() != NULL)
{
return GetReadyToRunInfo()->GetInlineTrackingMap()->GetInliners(inlineeOwnerMod, inlineeTkn, inlinersSize, inliners, incompleteData);
}
#endif
if(m_pPersistentInlineTrackingMapNGen != NULL)
{
return m_pPersistentInlineTrackingMapNGen->GetInliners(inlineeOwnerMod, inlineeTkn, inlinersSize, inliners, incompleteData);
}
return 0;
}
#ifndef DACCESS_COMPILE
// ===========================================================================
// Module
// ===========================================================================
//---------------------------------------------------------------------------------------------------
// This wrapper just invokes the real initialization inside a try/hook.
// szName is not null only for dynamic modules
//---------------------------------------------------------------------------------------------------
void Module::DoInit(AllocMemTracker *pamTracker, LPCWSTR szName)
{
CONTRACTL
{
INSTANCE_CHECK;
STANDARD_VM_CHECK;
}
CONTRACTL_END;
#ifdef PROFILING_SUPPORTED
{
BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
GCX_COOP();
g_profControlBlock.pProfInterface->ModuleLoadStarted((ModuleID) this);
END_PIN_PROFILER();
}
// Need TRY/HOOK instead of holder so we can get HR of exception thrown for profiler callback
EX_TRY
#endif
{
Initialize(pamTracker, szName);
}
#ifdef PROFILING_SUPPORTED
EX_HOOK
{
{
BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
g_profControlBlock.pProfInterface->ModuleLoadFinished((ModuleID) this, GET_EXCEPTION()->GetHR());
END_PIN_PROFILER();
}
}
EX_END_HOOK;
#endif
}
// Set the given bit on m_dwTransientFlags. Return true if we won the race to set the bit.
BOOL Module::SetTransientFlagInterlocked(DWORD dwFlag)
{
LIMITED_METHOD_CONTRACT;
for (;;)
{
DWORD dwTransientFlags = m_dwTransientFlags;
if ((dwTransientFlags & dwFlag) != 0)
return FALSE;
if ((DWORD)FastInterlockCompareExchange((LONG*)&m_dwTransientFlags, dwTransientFlags | dwFlag, dwTransientFlags) == dwTransientFlags)
return TRUE;
}
}
#if PROFILING_SUPPORTED
void Module::NotifyProfilerLoadFinished(HRESULT hr)
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
MODE_ANY;
}
CONTRACTL_END;
// Note that in general we wil reuse shared modules. So we need to make sure we only notify
// the profiler once.
if (SetTransientFlagInterlocked(IS_PROFILER_NOTIFIED))
{
// Record how many types are already present
DWORD countTypesOrig = 0;
DWORD countExportedTypesOrig = 0;
if (!IsResource())
{
countTypesOrig = GetMDImport()->GetCountWithTokenKind(mdtTypeDef);
countExportedTypesOrig = GetMDImport()->GetCountWithTokenKind(mdtExportedType);
}
// Notify the profiler, this may cause metadata to be updated
{
BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ModuleLoadFinished((ModuleID) this, hr);
if (SUCCEEDED(hr))
{
g_profControlBlock.pProfInterface->ModuleAttachedToAssembly((ModuleID) this,
(AssemblyID)m_pAssembly);
}
}
END_PIN_PROFILER();
}
// If there are more types than before, add these new types to the
// assembly
if (!IsResource())
{
DWORD countTypesAfterProfilerUpdate = GetMDImport()->GetCountWithTokenKind(mdtTypeDef);
DWORD countExportedTypesAfterProfilerUpdate = GetMDImport()->GetCountWithTokenKind(mdtExportedType);
// typeDefs rids 0 and 1 aren't included in the count, thus X typeDefs before means rid X+1 was valid and our incremental addition should start at X+2
for (DWORD typeDefRid = countTypesOrig + 2; typeDefRid < countTypesAfterProfilerUpdate + 2; typeDefRid++)
{
GetAssembly()->AddType(this, TokenFromRid(typeDefRid, mdtTypeDef));
}
// exportedType rid 0 isn't included in the count, thus X exportedTypes before means rid X was valid and our incremental addition should start at X+1
for (DWORD exportedTypeDef = countExportedTypesOrig + 1; exportedTypeDef < countExportedTypesAfterProfilerUpdate + 1; exportedTypeDef++)
{
GetAssembly()->AddExportedType(TokenFromRid(exportedTypeDef, mdtExportedType));
}
}
{
BEGIN_PIN_PROFILER(CORProfilerTrackAssemblyLoads());
if (IsManifest())
{
GCX_COOP();
g_profControlBlock.pProfInterface->AssemblyLoadFinished((AssemblyID) m_pAssembly, hr);
}
END_PIN_PROFILER();
}
}
}
#ifndef CROSSGEN_COMPILE
IMetaDataEmit *Module::GetValidatedEmitter()
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_NOTRIGGER;
INJECT_FAULT(COMPlusThrowOM());
MODE_ANY;
}
CONTRACTL_END;
if (m_pValidatedEmitter.Load() == NULL)
{
// In the past profilers could call any API they wanted on the the IMetaDataEmit interface and we didn't
// verify anything. To ensure we don't break back-compat the verifications are not enabled by default.
// Right now I have only added verifications for NGEN images, but in the future we might want verifications
// for all modules.
IMetaDataEmit* pEmit = NULL;
if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_ProfAPI_ValidateNGENInstrumentation) && HasNativeImage())
{
ProfilerMetadataEmitValidator* pValidator = new ProfilerMetadataEmitValidator(GetEmitter());
pValidator->QueryInterface(IID_IMetaDataEmit, (void**)&pEmit);
}
else
{
pEmit = GetEmitter();
pEmit->AddRef();
}
// Atomically swap it into the field (release it if we lose the race)
if (FastInterlockCompareExchangePointer(&m_pValidatedEmitter, pEmit, NULL) != NULL)
{
pEmit->Release();
}
}
return m_pValidatedEmitter.Load();
}
#endif // CROSSGEN_COMPILE
#endif // PROFILING_SUPPORTED
void Module::NotifyEtwLoadFinished(HRESULT hr)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END
// we report only successful loads
if (SUCCEEDED(hr) &&
ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
KEYWORDZERO))
{
BOOL fSharedModule = !SetTransientFlagInterlocked(IS_ETW_NOTIFIED);
ETW::LoaderLog::ModuleLoad(this, fSharedModule);
}
}
// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
//
// The constructor phase initializes just enough so that Destruct() can be safely called.
// It cannot throw or fail.
//
Module::Module(Assembly *pAssembly, mdFile moduleRef, PEFile *file)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
FORBID_FAULT;
}
CONTRACTL_END
PREFIX_ASSUME(pAssembly != NULL);
m_pAssembly = pAssembly;
m_moduleRef = moduleRef;
m_file = file;
m_dwTransientFlags = CLASSES_FREED;
if (!m_file->HasNativeImage())
{
// Memory allocated on LoaderHeap is zero-filled. Spot-check it here.
_ASSERTE(m_pBinder == NULL);
_ASSERTE(m_symbolFormat == eSymbolFormatNone);
}
file->AddRef();
}
void Module::InitializeForProfiling()
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(HasNativeOrReadyToRunImage());
}
CONTRACTL_END;
COUNT_T cbProfileList = 0;
m_nativeImageProfiling = FALSE;
if (HasNativeImage())
{
PEImageLayout * pNativeImage = GetNativeImage();
CORCOMPILE_VERSION_INFO * pNativeVersionInfo = pNativeImage->GetNativeVersionInfoMaybeNull();
if ((pNativeVersionInfo != NULL) && (pNativeVersionInfo->wConfigFlags & CORCOMPILE_CONFIG_INSTRUMENTATION))
{
m_nativeImageProfiling = GetAssembly()->IsInstrumented();
}
// Link the module to the profile data list if available.
m_methodProfileList = pNativeImage->GetNativeProfileDataList(&cbProfileList);
}
else // ReadyToRun image
{
#ifdef FEATURE_READYTORUN
// We already setup the m_methodProfileList in the ReadyToRunInfo constructor
if (m_methodProfileList != nullptr)
{
ReadyToRunInfo * pInfo = GetReadyToRunInfo();
PEImageLayout * pImage = pInfo->GetImage();
// Enable profiling if the ZapBBInstr value says to
m_nativeImageProfiling = GetAssembly()->IsInstrumented();
}
#endif
}
#ifdef FEATURE_LAZY_COW_PAGES
// When running a IBC tuning image to gather profile data
// we increment the block counts contained in this area.
//
if (cbProfileList)
EnsureWritablePages(m_methodProfileList, cbProfileList);
#endif
}
#ifdef FEATURE_PREJIT
void Module::InitializeNativeImage(AllocMemTracker* pamTracker)
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(HasNativeImage());
}
CONTRACTL_END;
PEImageLayout * pNativeImage = GetNativeImage();
ExecutionManager::AddNativeImageRange(dac_cast<TADDR>(pNativeImage->GetBase()), pNativeImage->GetVirtualSize(), this);
#ifndef CROSSGEN_COMPILE
LoadTokenTables();
LoadHelperTable();
#endif // CROSSGEN_COMPILE
#if defined(HAVE_GCCOVER)
if (GCStress<cfg_instr_ngen>::IsEnabled())
{
// Setting up gc coverage requires the base system classes
// to be initialized. So we must defer this for mscorlib.
if(!IsSystem())
{
SetupGcCoverageForNativeImage(this);
}
}
#endif // defined(HAVE_GCCOVER)
}
void Module::SetNativeMetadataAssemblyRefInCache(DWORD rid, PTR_Assembly pAssembly)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
if (m_NativeMetadataAssemblyRefMap == NULL)
{
IMDInternalImport* pImport = GetNativeAssemblyImport();
DWORD dwMaxRid = pImport->GetCountWithTokenKind(mdtAssemblyRef);
_ASSERTE(dwMaxRid > 0);
S_SIZE_T dwAllocSize = S_SIZE_T(sizeof(PTR_Assembly)) * S_SIZE_T(dwMaxRid);
AllocMemTracker amTracker;
PTR_Assembly * NativeMetadataAssemblyRefMap = (PTR_Assembly *) amTracker.Track( GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(dwAllocSize) );
// Note: Memory allocated on loader heap is zero filled
if (InterlockedCompareExchangeT<PTR_Assembly *>(&m_NativeMetadataAssemblyRefMap, NativeMetadataAssemblyRefMap, NULL) == NULL)
amTracker.SuppressRelease();
}
_ASSERTE(m_NativeMetadataAssemblyRefMap != NULL);
_ASSERTE(rid <= GetNativeAssemblyImport()->GetCountWithTokenKind(mdtAssemblyRef));
m_NativeMetadataAssemblyRefMap[rid-1] = pAssembly;
}
#else // FEATURE_PREJIT
BOOL Module::IsPersistedObject(void *address)
{
LIMITED_METHOD_CONTRACT;
return FALSE;
}
#endif // FEATURE_PREJIT
// Module initialization occurs in two phases: the constructor phase and the Initialize phase.
//
// The Initialize() phase completes the initialization after the constructor has run.
// It can throw exceptions but whether it throws or succeeds, it must leave the Module
// in a state where Destruct() can be safely called.
//
// szName is only used by dynamic modules, see ReflectionModule::Initialize
//
//
void Module::Initialize(AllocMemTracker *pamTracker, LPCWSTR szName)
{
CONTRACTL
{
INSTANCE_CHECK;
STANDARD_VM_CHECK;
PRECONDITION(szName == NULL);
}
CONTRACTL_END;
m_pSimpleName = m_file->GetSimpleName();
m_Crst.Init(CrstModule);
m_LookupTableCrst.Init(CrstModuleLookupTable, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD));
m_FixupCrst.Init(CrstModuleFixup, (CrstFlags)(CRST_HOST_BREAKABLE|CRST_REENTRANCY));
m_InstMethodHashTableCrst.Init(CrstInstMethodHashTable, CRST_REENTRANCY);
m_ISymUnmanagedReaderCrst.Init(CrstISymUnmanagedReader, CRST_DEBUGGER_THREAD);
if (!m_file->HasNativeImage())
{
AllocateMaps();
if (IsSystem() ||
(strcmp(m_pSimpleName, "System") == 0) ||
(strcmp(m_pSimpleName, "System.Core") == 0) ||
(strcmp(m_pSimpleName, "Windows.Foundation") == 0))
{
FastInterlockOr(&m_dwPersistedFlags, LOW_LEVEL_SYSTEM_ASSEMBLY_BY_NAME);
}
}
m_dwTransientFlags &= ~((DWORD)CLASSES_FREED); // Set flag indicating LookupMaps are now in a consistent and destructable state
#ifdef FEATURE_READYTORUN
if (!HasNativeImage() && !IsResource())
m_pReadyToRunInfo = ReadyToRunInfo::Initialize(this, pamTracker);
#endif
// Initialize the instance fields that we need for all non-Resource Modules
if (!IsResource())
{
if (m_pAvailableClasses == NULL && !IsReadyToRun())
{
m_pAvailableClasses = EEClassHashTable::Create(this,
GetAssembly()->IsCollectible() ? AVAILABLE_CLASSES_HASH_BUCKETS_COLLECTIBLE : AVAILABLE_CLASSES_HASH_BUCKETS,
FALSE /* bCaseInsensitive */, pamTracker);
}
if (m_pAvailableParamTypes == NULL)
{
m_pAvailableParamTypes = EETypeHashTable::Create(GetLoaderAllocator(), this, PARAMTYPES_HASH_BUCKETS, pamTracker);
}
if (m_pInstMethodHashTable == NULL)
{
m_pInstMethodHashTable = InstMethodHashTable::Create(GetLoaderAllocator(), this, PARAMMETHODS_HASH_BUCKETS, pamTracker);
}
if(m_pMemberRefToDescHashTable == NULL)
{
if (IsReflection())
{
m_pMemberRefToDescHashTable = MemberRefToDescHashTable::Create(this, MEMBERREF_MAP_INITIAL_SIZE, pamTracker);
}
else
{
IMDInternalImport * pImport = GetMDImport();
// Get #MemberRefs and create memberrefToDesc hash table
m_pMemberRefToDescHashTable = MemberRefToDescHashTable::Create(this, pImport->GetCountWithTokenKind(mdtMemberRef)+1, pamTracker);
}
}
#ifdef FEATURE_COMINTEROP
if (IsCompilationProcess() && m_pGuidToTypeHash == NULL)
{
// only allocate this during NGEN-ing
m_pGuidToTypeHash = GuidToMethodTableHashTable::Create(this, GUID_TO_TYPE_HASH_BUCKETS, pamTracker);
}
#endif // FEATURE_COMINTEROP
}
if (GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())
{
m_ModuleIndex = Module::AllocateModuleIndex();
m_ModuleID = (DomainLocalModule*)Module::IndexToID(m_ModuleIndex);
}
else
{
// this will be initialized a bit later.
m_ModuleID = NULL;
m_ModuleIndex.m_dwIndex = (SIZE_T)-1;
}
#ifdef FEATURE_COLLECTIBLE_TYPES
if (GetAssembly()->IsCollectible())
{
FastInterlockOr(&m_dwPersistedFlags, COLLECTIBLE_MODULE);
}
#endif // FEATURE_COLLECTIBLE_TYPES
// Prepare statics that are known at module load time
AllocateStatics(pamTracker);
#ifdef FEATURE_PREJIT
// Set up native image
if (HasNativeImage())
{
InitializeNativeImage(pamTracker);
}
#endif // FEATURE_PREJIT
if (HasNativeOrReadyToRunImage())
{
InitializeForProfiling();
}
#ifdef FEATURE_NATIVE_IMAGE_GENERATION
if (g_CorCompileVerboseLevel)
m_pNgenStats = new NgenStats();
#endif
if (!IsResource() && (m_AssemblyRefByNameTable == NULL))
{
Module::CreateAssemblyRefByNameTable(pamTracker);
}
// If the program has the "ForceEnc" env variable set we ensure every eligible
// module has EnC turned on.
if (g_pConfig->ForceEnc() && IsEditAndContinueCapable())
EnableEditAndContinue();
LOG((LF_CLASSLOADER, LL_INFO10, "Loaded pModule: \"%ws\".\n", GetDebugName()));
}
#endif // DACCESS_COMPILE
#ifdef FEATURE_COMINTEROP
#ifndef DACCESS_COMPILE
// static
GuidToMethodTableHashTable* GuidToMethodTableHashTable::Create(Module* pModule, DWORD cInitialBuckets,
AllocMemTracker *pamTracker)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
}
CONTRACTL_END;
LoaderHeap *pHeap = pModule->GetAssembly()->GetLowFrequencyHeap();
GuidToMethodTableHashTable *pThis = (GuidToMethodTableHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(GuidToMethodTableHashTable)));
// The base class get initialized through chaining of constructors. We allocated the hash instance via the
// loader heap instead of new so use an in-place new to call the constructors now.
new (pThis) GuidToMethodTableHashTable(pModule, pHeap, cInitialBuckets);
return pThis;
}
GuidToMethodTableEntry *GuidToMethodTableHashTable::InsertValue(PTR_GUID pGuid, PTR_MethodTable pMT,
BOOL bReplaceIfFound, AllocMemTracker *pamTracker)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
}
CONTRACTL_END;
GuidToMethodTableEntry *pEntry = NULL;
if (bReplaceIfFound)
{
pEntry = FindItem(pGuid, NULL);
}
if (pEntry != NULL)
{
pEntry->m_pMT = pMT;
}
else
{
pEntry = BaseAllocateEntry(pamTracker);
pEntry->m_Guid = pGuid;
pEntry->m_pMT = pMT;
DWORD hash = Hash(pGuid);
BaseInsertEntry(hash, pEntry);
}
return pEntry;
}
#endif // !DACCESS_COMPILE
PTR_MethodTable GuidToMethodTableHashTable::GetValue(const GUID * pGuid, LookupContext *pContext)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(pGuid));
}
CONTRACTL_END;
GuidToMethodTableEntry * pEntry = FindItem(pGuid, pContext);
if (pEntry != NULL)
{
return pEntry->m_pMT;
}
return NULL;
}
GuidToMethodTableEntry *GuidToMethodTableHashTable::FindItem(const GUID * pGuid, LookupContext *pContext)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(pGuid));
}
CONTRACTL_END;
// It's legal for the caller not to pass us a LookupContext, but we might need to iterate
// internally (since we lookup via hash and hashes may collide). So substitute our own
// private context if one was not provided.
LookupContext sAltContext;
if (pContext == NULL)
pContext = &sAltContext;
// The base class provides the ability to enumerate all entries with the same hash code.
// We further check which of these entries actually match the full key.
PTR_GuidToMethodTableEntry pSearch = BaseFindFirstEntryByHash(Hash(pGuid), pContext);
while (pSearch)
{
if (CompareKeys(pSearch, pGuid))
{
return pSearch;
}
pSearch = BaseFindNextEntryByHash(pContext);
}
return NULL;
}
BOOL GuidToMethodTableHashTable::CompareKeys(PTR_GuidToMethodTableEntry pEntry, const GUID * pGuid)
{
LIMITED_METHOD_DAC_CONTRACT;
return *pGuid == *(pEntry->m_Guid);
}
DWORD GuidToMethodTableHashTable::Hash(const GUID * pGuid)
{
LIMITED_METHOD_DAC_CONTRACT;
static_assert_no_msg(sizeof(GUID) % sizeof(DWORD) == 0);
static_assert_no_msg(sizeof(GUID) / sizeof(DWORD) == 4);
DWORD * pSlice = (DWORD*) pGuid;
return pSlice[0] ^ pSlice[1] ^ pSlice[2] ^ pSlice[3];
}
BOOL GuidToMethodTableHashTable::FindNext(Iterator *it, GuidToMethodTableEntry **ppEntry)
{
LIMITED_METHOD_DAC_CONTRACT;
if (!it->m_fIterating)
{
BaseInitIterator(&it->m_sIterator);
it->m_fIterating = true;
}
*ppEntry = it->m_sIterator.Next();
return *ppEntry ? TRUE : FALSE;
}
DWORD GuidToMethodTableHashTable::GetCount()
{
LIMITED_METHOD_DAC_CONTRACT;
return BaseGetElementCount();
}
#if defined(FEATURE_NATIVE_IMAGE_GENERATION) && !defined(DACCESS_COMPILE)
void GuidToMethodTableHashTable::Save(DataImage *pImage, CorProfileData *pProfileData)
{
WRAPPER_NO_CONTRACT;
Base_t::BaseSave(pImage, pProfileData);
}
void GuidToMethodTableHashTable::Fixup(DataImage *pImage)
{
WRAPPER_NO_CONTRACT;
Base_t::BaseFixup(pImage);
}
bool GuidToMethodTableHashTable::SaveEntry(DataImage *pImage, CorProfileData *pProfileData,
GuidToMethodTableEntry *pOldEntry, GuidToMethodTableEntry *pNewEntry,
EntryMappingTable *pMap)
{
LIMITED_METHOD_CONTRACT;
return false;
}
void GuidToMethodTableHashTable::FixupEntry(DataImage *pImage, GuidToMethodTableEntry *pEntry, void *pFixupBase, DWORD cbFixupOffset)
{
WRAPPER_NO_CONTRACT;
pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(GuidToMethodTableEntry, m_pMT), pEntry->m_pMT);
pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(GuidToMethodTableEntry, m_Guid), pEntry->m_Guid);
}
#endif // FEATURE_NATIVE_IMAGE_GENERATION && !DACCESS_COMPILE
#ifdef FEATURE_PREJIT
#ifndef DACCESS_COMPILE
BOOL Module::CanCacheWinRTTypeByGuid(MethodTable *pMT)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(IsCompilationProcess());
}
CONTRACTL_END;
// Don't cache mscorlib-internal declarations of WinRT types.
if (IsSystem() && pMT->IsProjectedFromWinRT())
return FALSE;
// Don't cache redirected WinRT types.
if (WinRTTypeNameConverter::IsRedirectedWinRTSourceType(pMT))
return FALSE;
#ifdef FEATURE_NATIVE_IMAGE_GENERATION
// Don't cache in a module that's not the NGen target, since the result
// won't be saved, and since the such a module might be read-only.
if (GetAppDomain()->ToCompilationDomain()->GetTargetModule() != this)
return FALSE;
#endif
return TRUE;
}
void Module::CacheWinRTTypeByGuid(PTR_MethodTable pMT, PTR_GuidInfo pgi /*= NULL*/)
{
CONTRACTL
{
STANDARD_VM_CHECK;
PRECONDITION(CheckPointer(pMT));
PRECONDITION(pMT->IsLegalNonArrayWinRTType());
PRECONDITION(pgi != NULL || pMT->GetGuidInfo() != NULL);
PRECONDITION(IsCompilationProcess());
}
CONTRACTL_END;
if (pgi == NULL)
{
pgi = pMT->GetGuidInfo();
}
AllocMemTracker amt;
m_pGuidToTypeHash->InsertValue(&pgi->m_Guid, pMT, TRUE, &amt);
amt.SuppressRelease();
}
#endif // !DACCESS_COMPILE
PTR_MethodTable Module::LookupTypeByGuid(const GUID & guid)
{
WRAPPER_NO_CONTRACT;
// Triton ni images do not have this hash.
if (m_pGuidToTypeHash != NULL)
return m_pGuidToTypeHash->GetValue(&guid, NULL);
else
return NULL;
}
void Module::GetCachedWinRTTypes(SArray<PTR_MethodTable> * pTypes, SArray<GUID> * pGuids)
{
CONTRACTL
{
STANDARD_VM_CHECK;
SUPPORTS_DAC;
}
CONTRACTL_END;
// Triton ni images do not have this hash.
if (m_pGuidToTypeHash != NULL)
{
GuidToMethodTableHashTable::Iterator it(m_pGuidToTypeHash);
GuidToMethodTableEntry *pEntry;
while (m_pGuidToTypeHash->FindNext(&it, &pEntry))
{
pTypes->Append(pEntry->m_pMT);
pGuids->Append(*pEntry->m_Guid);
}
}
}
#endif // FEATURE_PREJIT
#endif // FEATURE_COMINTEROP
#ifndef DACCESS_COMPILE
MemberRefToDescHashTable* MemberRefToDescHashTable::Create(Module *pModule, DWORD cInitialBuckets, AllocMemTracker *pamTracker)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
}
CONTRACTL_END;
LoaderHeap *pHeap = pModule->GetAssembly()->GetLowFrequencyHeap();
MemberRefToDescHashTable *pThis = (MemberRefToDescHashTable*)pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(MemberRefToDescHashTable)));
// The base class get initialized through chaining of constructors. We allocated the hash instance via the
// loader heap instead of new so use an in-place new to call the constructors now.
new (pThis) MemberRefToDescHashTable(pModule, pHeap, cInitialBuckets);
return pThis;
}
//Inserts FieldRef
MemberRefToDescHashEntry* MemberRefToDescHashTable::Insert(mdMemberRef token , FieldDesc *value)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
}
CONTRACTL_END;
LookupContext sAltContext;
_ASSERTE((dac_cast<TADDR>(value) & IS_FIELD_MEMBER_REF) == 0);
MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(token), &sAltContext);
if (pEntry != NULL)
{
// If memberRef is hot token in that case entry for memberref is already persisted in ngen image. So entry for it will already be present in hash table.
// However its value will be null. We need to set its actual value.
if(pEntry->m_value == dac_cast<TADDR>(NULL))
{
EnsureWritablePages(&(pEntry->m_value));
pEntry->m_value = dac_cast<TADDR>(value)|IS_FIELD_MEMBER_REF;
}
_ASSERTE(pEntry->m_value == (dac_cast<TADDR>(value)|IS_FIELD_MEMBER_REF));
return pEntry;
}
// For non hot tokens insert new entry in hashtable
pEntry = BaseAllocateEntry(NULL);
pEntry->m_value = dac_cast<TADDR>(value)|IS_FIELD_MEMBER_REF;
BaseInsertEntry(RidFromToken(token), pEntry);
return pEntry;
}
// Insert MethodRef
MemberRefToDescHashEntry* MemberRefToDescHashTable::Insert(mdMemberRef token , MethodDesc *value)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(!FORBIDGC_LOADER_USE_ENABLED());
}
CONTRACTL_END;
LookupContext sAltContext;
MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(token), &sAltContext);
if (pEntry != NULL)
{
// If memberRef is hot token in that case entry for memberref is already persisted in ngen image. So entry for it will already be present in hash table.
// However its value will be null. We need to set its actual value.
if(pEntry->m_value == dac_cast<TADDR>(NULL))
{
EnsureWritablePages(&(pEntry->m_value));
pEntry->m_value = dac_cast<TADDR>(value);
}
_ASSERTE(pEntry->m_value == dac_cast<TADDR>(value));
return pEntry;
}
// For non hot tokens insert new entry in hashtable
pEntry = BaseAllocateEntry(NULL);
pEntry->m_value = dac_cast<TADDR>(value);
BaseInsertEntry(RidFromToken(token), pEntry);
return pEntry;
}
#if defined(FEATURE_NATIVE_IMAGE_GENERATION)
void MemberRefToDescHashTable::Save(DataImage *pImage, CorProfileData *pProfileData)
{
STANDARD_VM_CONTRACT;
// Mark if the tokens are hot
if (pProfileData)
{
DWORD numInTokenList = pProfileData->GetHotTokens(mdtMemberRef>>24, 1<<RidMap, 1<<RidMap, NULL, 0);
if (numInTokenList > 0)
{
LookupContext sAltContext;
mdToken *tokenList = (mdToken*)(void*)pImage->GetModule()->GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(mdToken)) * S_SIZE_T(numInTokenList));
pProfileData->GetHotTokens(mdtMemberRef>>24, 1<<RidMap, 1<<RidMap, tokenList, numInTokenList);
for (DWORD i = 0; i < numInTokenList; i++)
{
DWORD rid = RidFromToken(tokenList[i]);
MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(tokenList[i]), &sAltContext);
if (pEntry != NULL)
{
_ASSERTE((pEntry->m_value & 0x1) == 0);
pEntry->m_value |= 0x1;
}
}
}
}
BaseSave(pImage, pProfileData);
}
void MemberRefToDescHashTable::FixupEntry(DataImage *pImage, MemberRefToDescHashEntry *pEntry, void *pFixupBase, DWORD cbFixupOffset)
{
//As there is no more hard binding initialize MemberRef* to NULL
pImage->ZeroPointerField(pFixupBase, cbFixupOffset + offsetof(MemberRefToDescHashEntry, m_value));
}
#endif // FEATURE_NATIVE_IMAGE_GENERATION
#endif // !DACCESS_COMPILE
PTR_MemberRef MemberRefToDescHashTable::GetValue(mdMemberRef token, BOOL *pfIsMethod)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
LookupContext sAltContext;
MemberRefToDescHashEntry *pEntry = (PTR_MemberRefToDescHashEntry) BaseFindFirstEntryByHash(RidFromToken(token), &sAltContext);
if (pEntry != NULL)
{
if(pEntry->m_value & IS_FIELD_MEMBER_REF)
*pfIsMethod = FALSE;
else
*pfIsMethod = TRUE;
return (PTR_MemberRef)(pEntry->m_value & (~MEMBER_REF_MAP_ALL_FLAGS));
}
return NULL;
}
void Module::SetDebuggerInfoBits(DebuggerAssemblyControlFlags newBits)
{
LIMITED_METHOD_CONTRACT;
SUPPORTS_DAC;
_ASSERTE(((newBits << DEBUGGER_INFO_SHIFT_PRIV) &
~DEBUGGER_INFO_MASK_PRIV) == 0);
m_dwTransientFlags &= ~DEBUGGER_INFO_MASK_PRIV;
m_dwTransientFlags |= (newBits << DEBUGGER_INFO_SHIFT_PRIV);
#ifdef DEBUGGING_SUPPORTED
BOOL setEnC = ((newBits & DACF_ENC_ENABLED) != 0) && IsEditAndContinueCapable();
// IsEditAndContinueCapable should already check !GetAssembly()->IsDomainNeutral
_ASSERTE(!setEnC || !GetAssembly()->IsDomainNeutral());
// The only way can change Enc is through debugger override.
if (setEnC)
{
EnableEditAndContinue();
}
else
{
if (!g_pConfig->ForceEnc())
DisableEditAndContinue();
}
#endif // DEBUGGING_SUPPORTED
#if defined(DACCESS_COMPILE)
// Now that we've changed m_dwTransientFlags, update that in the target too.
// This will fail for read-only target.
// If this fails, it will throw an exception.
// @dbgtodo dac write: finalize on plans for how DAC writes to the target.
HRESULT hrDac;
hrDac = DacWriteHostInstance(this, true);
_ASSERTE(SUCCEEDED(hrDac)); // would throw if there was an error.
#endif // DACCESS_COMPILE
}
#ifndef DACCESS_COMPILE
/* static */
Module *Module::Create(Assembly *pAssembly, mdFile moduleRef, PEFile *file, AllocMemTracker *pamTracker)
{
CONTRACT(Module *)
{
STANDARD_VM_CHECK;
PRECONDITION(CheckPointer(pAssembly));
PRECONDITION(CheckPointer(file));
PRECONDITION(!IsNilToken(moduleRef) || file->IsAssembly());
POSTCONDITION(CheckPointer(RETVAL));
POSTCONDITION(RETVAL->GetFile() == file);
}
CONTRACT_END;
// Hoist CONTRACT into separate routine because of EX incompatibility
Module *pModule = NULL;
// Create the module
#ifdef FEATURE_PREJIT
if (file->HasNativeImage())
{
pModule = file->GetLoadedNative()->GetPersistedModuleImage();
PREFIX_ASSUME(pModule != NULL);
CONSISTENCY_CHECK_MSG(pModule->m_pAssembly == NULL || !pModule->IsTenured(), // if the module is not tenured it could be our previous attempt
"Native image can only be used once per process\n");
EnsureWritablePages(pModule);
pModule = new ((void*) pModule) Module(pAssembly, moduleRef, file);
PREFIX_ASSUME(pModule != NULL);
}
#endif // FEATURE_PREJIT
if (pModule == NULL)
{
#ifdef EnC_SUPPORTED
if (IsEditAndContinueCapable(pAssembly, file))
{
// IsEditAndContinueCapable should already check !pAssembly->IsDomainNeutral
_ASSERTE(!pAssembly->IsDomainNeutral());
// if file is EnCCapable, always create an EnC-module, but EnC won't necessarily be enabled.
// Debugger enables this by calling SetJITCompilerFlags on LoadModule callback.
void* pMemory = pamTracker->Track(pAssembly->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(EditAndContinueModule))));
pModule = new (pMemory) EditAndContinueModule(pAssembly, moduleRef, file);
}
else
#endif // EnC_SUPPORTED
{
void* pMemory = pamTracker->Track(pAssembly->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(Module))));
pModule = new (pMemory) Module(pAssembly, moduleRef, file);
}
}
PREFIX_ASSUME(pModule != NULL);
ModuleHolder pModuleSafe(pModule);
pModuleSafe->DoInit(pamTracker, NULL);
RETURN pModuleSafe.Extract();
}
void Module::ApplyMetaData()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
LOG((LF_CLASSLOADER, LL_INFO100, "Module::ApplyNewMetaData %x\n", this));
HRESULT hr = S_OK;
ULONG ulCount;
// Ensure for TypeRef
ulCount = GetMDImport()->GetCountWithTokenKind(mdtTypeRef) + 1;
EnsureTypeRefCanBeStored(TokenFromRid(ulCount, mdtTypeRef));
// Ensure for AssemblyRef
ulCount = GetMDImport()->GetCountWithTokenKind(mdtAssemblyRef) + 1;
EnsureAssemblyRefCanBeStored(TokenFromRid(ulCount, mdtAssemblyRef));
}
//
// Destructor for Module
//
void Module::Destruct()
{
CONTRACTL
{
INSTANCE_CHECK;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
LOG((LF_EEMEM, INFO3, "Deleting module %x\n", this));
#ifdef PROFILING_SUPPORTED
{
BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
if (!IsBeingUnloaded())
{
// Profiler is causing some peripheral class loads. Probably this just needs
// to be turned into a Fault_not_fatal and moved to a specific place inside the profiler.
EX_TRY
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ModuleUnloadStarted((ModuleID) this);
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
}
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
DACNotify::DoModuleUnloadNotification(this);
// Free classes in the class table
FreeClassTables();
#ifdef DEBUGGING_SUPPORTED
if (g_pDebugInterface)
{
GCX_PREEMP();
g_pDebugInterface->DestructModule(this);
}
#endif // DEBUGGING_SUPPORTED
ReleaseISymUnmanagedReader();
// Clean up sig cookies
VASigCookieBlock *pVASigCookieBlock = m_pVASigCookieBlock;
while (pVASigCookieBlock)
{
VASigCookieBlock *pNext = pVASigCookieBlock->m_Next;
delete pVASigCookieBlock;
pVASigCookieBlock = pNext;
}
// Clean up the IL stub cache
if (m_pILStubCache != NULL)
{
delete m_pILStubCache;
}
#ifdef PROFILING_SUPPORTED
{
BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
// Profiler is causing some peripheral class loads. Probably this just needs
// to be turned into a Fault_not_fatal and moved to a specific place inside the profiler.
EX_TRY
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ModuleUnloadFinished((ModuleID) this, S_OK);
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
END_PIN_PROFILER();
}
if (m_pValidatedEmitter.Load() != NULL)
{
m_pValidatedEmitter->Release();
}
#endif // PROFILING_SUPPORTED
//
// Warning - deleting the zap file will cause the module to be unmapped
//
ClearInMemorySymbolStream();
m_Crst.Destroy();
m_FixupCrst.Destroy();
m_LookupTableCrst.Destroy();
m_InstMethodHashTableCrst.Destroy();
m_ISymUnmanagedReaderCrst.Destroy();
if (m_debuggerSpecificData.m_pDynamicILCrst)
{
delete m_debuggerSpecificData.m_pDynamicILCrst;
}
if (m_debuggerSpecificData.m_pDynamicILBlobTable)
{
delete m_debuggerSpecificData.m_pDynamicILBlobTable;
}
if (m_debuggerSpecificData.m_pTemporaryILBlobTable)
{
delete m_debuggerSpecificData.m_pTemporaryILBlobTable;
}
if (m_debuggerSpecificData.m_pILOffsetMappingTable)
{
for (ILOffsetMappingTable::Iterator pCurElem = m_debuggerSpecificData.m_pILOffsetMappingTable->Begin(),
pEndElem = m_debuggerSpecificData.m_pILOffsetMappingTable->End();
pCurElem != pEndElem;
pCurElem++)
{
ILOffsetMappingEntry entry = *pCurElem;
entry.m_mapping.Clear();
}
delete m_debuggerSpecificData.m_pILOffsetMappingTable;
}
#ifdef FEATURE_PREJIT
if (HasNativeImage())
{
m_file->Release();
}
else
#endif // FEATURE_PREJIT
{
m_file->Release();
}
// If this module was loaded as domain-specific, then
// we must free its ModuleIndex so that it can be reused
FreeModuleIndex();
}
#ifdef FEATURE_PREJIT
void Module::DeleteNativeCodeRanges()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
FORBID_FAULT;
}
CONTRACTL_END;
if (HasNativeImage())
{
PEImageLayout * pNativeImage = GetNativeImage();
ExecutionManager::DeleteRange(dac_cast<TADDR>(pNativeImage->GetBase()));
}
}
#endif
bool Module::NeedsGlobalMethodTable()
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
IMDInternalImport * pImport = GetMDImport();
if (!IsResource() && pImport->IsValidToken(COR_GLOBAL_PARENT_TOKEN))
{
{
HENUMInternalHolder funcEnum(pImport);
funcEnum.EnumGlobalFunctionsInit();
if (pImport->EnumGetCount(&funcEnum) != 0)
return true;
}
{
HENUMInternalHolder fieldEnum(pImport);
fieldEnum.EnumGlobalFieldsInit();
if (pImport->EnumGetCount(&fieldEnum) != 0)
return true;
}
}
// resource module or no global statics nor global functions
return false;
}
MethodTable *Module::GetGlobalMethodTable()
{
CONTRACT (MethodTable *)
{
INSTANCE_CHECK;
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(CONTRACT_RETURN NULL;);
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
if ((m_dwPersistedFlags & COMPUTED_GLOBAL_CLASS) == 0)
{
MethodTable *pMT = NULL;
if (NeedsGlobalMethodTable())
{
pMT = ClassLoader::LoadTypeDefThrowing(this, COR_GLOBAL_PARENT_TOKEN,
ClassLoader::ThrowIfNotFound,
ClassLoader::FailIfUninstDefOrRef).AsMethodTable();
}
FastInterlockOr(&m_dwPersistedFlags, COMPUTED_GLOBAL_CLASS);
RETURN pMT;
}
else
{
RETURN LookupTypeDef(COR_GLOBAL_PARENT_TOKEN).AsMethodTable();
}
}
#endif // !DACCESS_COMPILE
#ifdef FEATURE_PREJIT
/*static*/
BOOL Module::IsAlwaysSavedInPreferredZapModule(Instantiation classInst, // the type arguments to the type (if any)
Instantiation methodInst) // the type arguments to the method (if any)
{
LIMITED_METHOD_CONTRACT;
return ClassLoader::IsTypicalSharedInstantiation(classInst) &&
ClassLoader::IsTypicalSharedInstantiation(methodInst);
}
//this gets called recursively for generics, so do a probe.
PTR_Module Module::ComputePreferredZapModule(Module * pDefinitionModule,
Instantiation classInst,
Instantiation methodInst)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
PTR_Module ret = NULL;
INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow());
ret = Module::ComputePreferredZapModuleHelper( pDefinitionModule,
classInst,
methodInst );
END_INTERIOR_STACK_PROBE;
return ret;
}
//
// Is pModule likely a dependency of pOtherModule? Heuristic used by preffered zap module algorithm.
// It can return both false positives and negatives.
//
static bool IsLikelyDependencyOf(Module * pModule, Module * pOtherModule)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
MODE_ANY;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(pOtherModule));
}
CONTRACTL_END
// Every module has a dependency with itself
if (pModule == pOtherModule)
return true;
//
// Explicit check for low level system assemblies is working around Win8P facades introducing extra layer between low level system assemblies
// (System.dll or System.Core.dll) and the app assemblies. Because of this extra layer, the check below won't see the direct
// reference between these low level system assemblies and the app assemblies. The prefererred zap module for instantiations of generic
// collections from these low level system assemblies (like LinkedList<AppType>) should be module of AppType. It would be module of the generic
// collection without this check. On desktop (FEATURE_FULL_NGEN defined), it would result into inefficient code because of the instantiations
// would be speculative. On CoreCLR (FEATURE_FULL_NGEN not defined), it would result into the instantiations not getting saved into native
// image at all.
//
// Similar problem exists for Windows.Foundation.winmd. There is a cycle between Windows.Foundation.winmd and Windows.Storage.winmd. This cycle
// would cause prefererred zap module for instantiations of foundation types (like IAsyncOperation<StorageFolder>) to be Windows.Foundation.winmd.
// It is a bad choice. It should be Windows.Storage.winmd instead. We explicitly push Windows.Foundation to lower level by treating it as
// low level system assembly to avoid this problem.
//
if (pModule->IsLowLevelSystemAssemblyByName())
{
if (!pOtherModule->IsLowLevelSystemAssemblyByName())
return true;
// Every module depends upon mscorlib
if (pModule->IsSystem())
return true;
// mscorlib does not depend upon any other module
if (pOtherModule->IsSystem())
return false;
}
else
{
if (pOtherModule->IsLowLevelSystemAssemblyByName())
return false;
}
// At this point neither pModule or pOtherModule is mscorlib
#ifndef DACCESS_COMPILE
//
// We will check to see if the pOtherModule has a reference to pModule
//
// If we can match the assembly ref in the ManifestModuleReferencesMap we can early out.
// This early out kicks in less than half of the time. It hurts performance on average.
// if (!IsNilToken(pOtherModule->FindAssemblyRef(pModule->GetAssembly())))
// return true;
if (pOtherModule->HasReferenceByName(pModule->GetSimpleName()))
return true;
#endif // DACCESS_COMPILE
return false;
}
// Determine the "preferred ngen home" for an instantiated type or method
// * This is the first ngen module that the loader will look in;
// * Also, we only hard bind to a type or method that lives in its preferred module
// The following properties must hold of the preferred module:
// - it must be one of the component type's declaring modules
// - if the type or method is open then the preferred module must be that of one of the type parameters
// (this ensures that we can always hard bind to open types and methods created during ngen)
// - for always-saved instantiations it must be the declaring module of the generic definition
// Otherwise, we try to pick a module that is likely to reference the type or method
//
/* static */
PTR_Module Module::ComputePreferredZapModuleHelper(
Module * pDefinitionModule, // the module that declares the generic type or method
Instantiation classInst, // the type arguments to the type (if any)
Instantiation methodInst) // the type arguments to the method (if any)
{
CONTRACT(PTR_Module)
{
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
MODE_ANY;
PRECONDITION(CheckPointer(pDefinitionModule, NULL_OK));
// One of them will be non-null... Note we don't use CheckPointer
// because that raises a breakpoint in the debugger
PRECONDITION(pDefinitionModule != NULL || !classInst.IsEmpty() || !methodInst.IsEmpty());
POSTCONDITION(CheckPointer(RETVAL));
SUPPORTS_DAC;
}
CONTRACT_END
DWORD totalArgs = classInst.GetNumArgs() + methodInst.GetNumArgs();
// The open type parameters takes precendence over closed type parameters since
// we always hardbind to open types.
for (DWORD i = 0; i < totalArgs; i++)
{
TypeHandle thArg = (i < classInst.GetNumArgs()) ? classInst[i] : methodInst[i - classInst.GetNumArgs()];
// Encoded types are never open
_ASSERTE(!thArg.IsEncodedFixup());
Module * pOpenModule = thArg.GetDefiningModuleForOpenType();
if (pOpenModule != NULL)
RETURN dac_cast<PTR_Module>(pOpenModule);
}
// The initial value of pCurrentPZM is the pDefinitionModule or mscorlib
Module* pCurrentPZM = (pDefinitionModule != NULL) ? pDefinitionModule : MscorlibBinder::GetModule();
bool preferredZapModuleBasedOnValueType = false;
for (DWORD i = 0; i < totalArgs; i++)
{
TypeHandle pTypeParam = (i < classInst.GetNumArgs()) ? classInst[i] : methodInst[i - classInst.GetNumArgs()];
_ASSERTE(pTypeParam != NULL);
_ASSERTE(!pTypeParam.IsEncodedFixup());
Module * pParamPZM = GetPreferredZapModuleForTypeHandle(pTypeParam);
//
// If pCurrentPZM is not a dependency of pParamPZM
// then we aren't going to update pCurrentPZM
//
if (IsLikelyDependencyOf(pCurrentPZM, pParamPZM))
{
// If we have a type parameter that is a value type
// and we don't yet have a value type based pCurrentPZM
// then we will select it's module as the new pCurrentPZM.
//
if (pTypeParam.IsValueType() && !preferredZapModuleBasedOnValueType)
{
pCurrentPZM = pParamPZM;
preferredZapModuleBasedOnValueType = true;
}
else
{
// The normal rule is to replace the pCurrentPZM only when
// both of the following are true:
// pCurrentPZM is a dependency of pParamPZM
// and pParamPZM is not a dependency of pCurrentPZM
//
// note that the second condition is alway true when pCurrentPZM is mscorlib
//
if (!IsLikelyDependencyOf(pParamPZM, pCurrentPZM))
{
pCurrentPZM = pParamPZM;
}
}
}
}
RETURN dac_cast<PTR_Module>(pCurrentPZM);
}
PTR_Module Module::ComputePreferredZapModule(TypeKey *pKey)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
if (pKey->GetKind() == ELEMENT_TYPE_CLASS)
{
return Module::ComputePreferredZapModule(pKey->GetModule(),
pKey->GetInstantiation());
}
else if (pKey->GetKind() != ELEMENT_TYPE_FNPTR)
return Module::GetPreferredZapModuleForTypeHandle(pKey->GetElementType());
else
return NULL;
}
/* see code:Module::ComputePreferredZapModuleHelper for more */
/*static*/
PTR_Module Module::GetPreferredZapModuleForMethodTable(MethodTable *pMT)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
PTR_Module pRet=NULL;
INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(10, NO_FORBIDGC_LOADER_USE_ThrowSO(););
if (pMT->IsArray())
{
TypeHandle elemTH = pMT->GetApproxArrayElementTypeHandle();
pRet= ComputePreferredZapModule(NULL, Instantiation(&elemTH, 1));
}
else if (pMT->HasInstantiation() && !pMT->IsGenericTypeDefinition())
{
pRet= ComputePreferredZapModule(pMT->GetModule(),
pMT->GetInstantiation());
}
else
{
// If it is uninstantiated or it is the generic type definition itself
// then its loader module is simply the module containing its TypeDef
pRet= pMT->GetModule();
}
END_INTERIOR_STACK_PROBE;
return pRet;
}
/*static*/
PTR_Module Module::GetPreferredZapModuleForTypeDesc(PTR_TypeDesc pTD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
SUPPORTS_DAC;
if (pTD->HasTypeParam())
return GetPreferredZapModuleForTypeHandle(pTD->GetTypeParam());
else if (pTD->IsGenericVariable())
return pTD->GetModule();
_ASSERTE(pTD->GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
PTR_FnPtrTypeDesc pFnPtrTD = dac_cast<PTR_FnPtrTypeDesc>(pTD);
// Result type of function type is used for preferred zap module
return GetPreferredZapModuleForTypeHandle(pFnPtrTD->GetRetAndArgTypesPointer()[0]);
}
/*static*/
PTR_Module Module::GetPreferredZapModuleForTypeHandle(TypeHandle t)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
SUPPORTS_DAC;
if (t.IsTypeDesc())
return GetPreferredZapModuleForTypeDesc(t.AsTypeDesc());
else
return GetPreferredZapModuleForMethodTable(t.AsMethodTable());
}
/*static*/
PTR_Module Module::GetPreferredZapModuleForMethodDesc(const MethodDesc *pMD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
if (pMD->IsTypicalMethodDefinition())
{
return PTR_Module(pMD->GetModule());
}
else if (pMD->IsGenericMethodDefinition())
{
return GetPreferredZapModuleForMethodTable(pMD->GetMethodTable());
}
else
{
return ComputePreferredZapModule(pMD->GetModule(),
pMD->GetClassInstantiation(),
pMD->GetMethodInstantiation());
}
}
/* see code:Module::ComputePreferredZapModuleHelper for more */
/*static*/
PTR_Module Module::GetPreferredZapModuleForFieldDesc(FieldDesc * pFD)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
// The approx MT is sufficient: it's always the one that owns the FieldDesc
// data structure
return GetPreferredZapModuleForMethodTable(pFD->GetApproxEnclosingMethodTable());
}
#endif // FEATURE_PREJIT
/*static*/
BOOL Module::IsEditAndContinueCapable(Assembly *pAssembly, PEFile *file)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
_ASSERTE(pAssembly != NULL && file != NULL);
// Some modules are never EnC-capable
return ! (pAssembly->GetDebuggerInfoBits() & DACF_ALLOW_JIT_OPTS ||
pAssembly->IsDomainNeutral() ||
file->IsSystem() ||
file->IsResource() ||
file->HasNativeImage() ||
file->IsDynamic());
}
BOOL Module::IsManifest()
{
WRAPPER_NO_CONTRACT;
return dac_cast<TADDR>(GetAssembly()->GetManifestModule()) ==
dac_cast<TADDR>(this);
}
DomainAssembly* Module::GetDomainAssembly(AppDomain *pDomain)
{
CONTRACT(DomainAssembly *)
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomain, NULL_OK));
POSTCONDITION(CheckPointer(RETVAL));
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACT_END;
if (IsManifest())
RETURN (DomainAssembly *) GetDomainFile(pDomain);
else
RETURN (DomainAssembly *) m_pAssembly->GetDomainAssembly(pDomain);
}
DomainFile *Module::GetDomainFile(AppDomain *pDomain)
{
CONTRACT(DomainFile *)
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomain));
POSTCONDITION(CheckPointer(RETVAL));
GC_TRIGGERS;
THROWS;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACT_END;
if (Module::IsEncodedModuleIndex(GetModuleID()))
{
DomainLocalBlock *pLocalBlock = pDomain->GetDomainLocalBlock();
DomainFile *pDomainFile = pLocalBlock->TryGetDomainFile(GetModuleIndex());
#if !defined(DACCESS_COMPILE) && defined(FEATURE_LOADER_OPTIMIZATION)
if (pDomainFile == NULL)
pDomainFile = pDomain->LoadDomainNeutralModuleDependency(this, FILE_LOADED);
#endif // !DACCESS_COMPILE
RETURN (PTR_DomainFile) pDomainFile;
}
else
{
CONSISTENCY_CHECK(dac_cast<TADDR>(pDomain) == dac_cast<TADDR>(GetDomain()) || IsSingleAppDomain());
RETURN dac_cast<PTR_DomainFile>(m_ModuleID->GetDomainFile());
}
}
DomainAssembly* Module::FindDomainAssembly(AppDomain *pDomain)
{
CONTRACT(DomainAssembly *)
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomain));
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
if (IsManifest())
RETURN dac_cast<PTR_DomainAssembly>(FindDomainFile(pDomain));
else
RETURN m_pAssembly->FindDomainAssembly(pDomain);
}
DomainModule *Module::GetDomainModule(AppDomain *pDomain)
{
CONTRACT(DomainModule *)
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomain));
PRECONDITION(!IsManifest());
POSTCONDITION(CheckPointer(RETVAL));
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACT_END;
RETURN (DomainModule *) GetDomainFile(pDomain);
}
DomainFile *Module::FindDomainFile(AppDomain *pDomain)
{
CONTRACT(DomainFile *)
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomain));
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
if (Module::IsEncodedModuleIndex(GetModuleID()))
{
DomainLocalBlock *pLocalBlock = pDomain->GetDomainLocalBlock();
RETURN pLocalBlock->TryGetDomainFile(GetModuleIndex());
}
else
{
if (dac_cast<TADDR>(pDomain) == dac_cast<TADDR>(GetDomain()) || IsSingleAppDomain())
RETURN m_ModuleID->GetDomainFile();
else
RETURN NULL;
}
}
DomainModule *Module::FindDomainModule(AppDomain *pDomain)
{
CONTRACT(DomainModule *)
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomain));
PRECONDITION(!IsManifest());
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
}
CONTRACT_END;
RETURN (DomainModule *) FindDomainFile(pDomain);
}
#ifndef DACCESS_COMPILE
#include "staticallocationhelpers.inl"
// Parses metadata and initializes offsets of per-class static blocks.
void Module::BuildStaticsOffsets(AllocMemTracker *pamTracker)
{
STANDARD_VM_CONTRACT;
// Trade off here. We want a slot for each type. That way we can get to 2 bits per class and
// index directly and not need a mapping from ClassID to MethodTable (we will use the RID
// as the mapping)
IMDInternalImport *pImport = GetMDImport();
DWORD * pRegularStaticOffsets = NULL;
DWORD * pThreadStaticOffsets = NULL;
// Get the number of types/classes defined in this module. Add 1 to count the module itself
DWORD dwNumTypes = pImport->GetCountWithTokenKind(mdtTypeDef) + 1; // +1 for module type
// [0] covers regular statics, [1] covers thread statics
DWORD dwGCHandles[2] = { 0, 0 };
// Organization in memory of the static block
//
//
// | GC Statics |
// |
// |
// | Class Data (one byte per class) | pointer to gc statics | primitive type statics |
//
//
#ifndef CROSSBITNESS_COMPILE
// The assertions must hold in every non-crossbitness scenario
_ASSERTE(OFFSETOF__DomainLocalModule__m_pDataBlob_ == DomainLocalModule::OffsetOfDataBlob());
_ASSERTE(OFFSETOF__ThreadLocalModule__m_pDataBlob == ThreadLocalModule::OffsetOfDataBlob());
#endif
DWORD dwNonGCBytes[2] = {
DomainLocalModule::OffsetOfDataBlob() + sizeof(BYTE)*dwNumTypes,
ThreadLocalModule::OffsetOfDataBlob() + sizeof(BYTE)*dwNumTypes
};
HENUMInternalHolder hTypeEnum(pImport);
hTypeEnum.EnumAllInit(mdtTypeDef);
mdTypeDef type;
// Parse each type of the class
while (pImport->EnumNext(&hTypeEnum, &type))
{
// Set offset for this type
DWORD dwIndex = RidFromToken(type) - 1;
// [0] covers regular statics, [1] covers thread statics
DWORD dwAlignment[2] = { 1, 1 };
DWORD dwClassNonGCBytes[2] = { 0, 0 };
DWORD dwClassGCHandles[2] = { 0, 0 };
// need to check if the type is generic and if so exclude it from iteration as we don't know the size
HENUMInternalHolder hGenericEnum(pImport);
hGenericEnum.EnumInit(mdtGenericParam, type);
ULONG cGenericParams = pImport->EnumGetCount(&hGenericEnum);
if (cGenericParams == 0)
{
HENUMInternalHolder hFieldEnum(pImport);
hFieldEnum.EnumInit(mdtFieldDef, type);
mdFieldDef field;
// Parse each field of the type
while (pImport->EnumNext(&hFieldEnum, &field))
{
BOOL fSkip = FALSE;
CorElementType ElementType = ELEMENT_TYPE_END;
mdToken tkValueTypeToken = 0;
int kk; // Use one set of variables for regular statics, and the other set for thread statics
fSkip = GetStaticFieldElementTypeForFieldDef(this, pImport, field, &ElementType, &tkValueTypeToken, &kk);
if (fSkip)
continue;
// We account for "regular statics" and "thread statics" separately.
// Currently we are lumping RVA and context statics into "regular statics",
// but we probably shouldn't.
switch (ElementType)
{
case ELEMENT_TYPE_I1:
case ELEMENT_TYPE_U1:
case ELEMENT_TYPE_BOOLEAN:
dwClassNonGCBytes[kk] += 1;
break;
case ELEMENT_TYPE_I2:
case ELEMENT_TYPE_U2:
case ELEMENT_TYPE_CHAR:
dwAlignment[kk] = max(2, dwAlignment[kk]);
dwClassNonGCBytes[kk] += 2;
break;
case ELEMENT_TYPE_I4:
case ELEMENT_TYPE_U4:
case ELEMENT_TYPE_R4:
dwAlignment[kk] = max(4, dwAlignment[kk]);
dwClassNonGCBytes[kk] += 4;
break;
case ELEMENT_TYPE_FNPTR:
case ELEMENT_TYPE_PTR:
case ELEMENT_TYPE_I:
case ELEMENT_TYPE_U:
dwAlignment[kk] = max((1 << LOG2_PTRSIZE), dwAlignment[kk]);
dwClassNonGCBytes[kk] += (1 << LOG2_PTRSIZE);
break;
case ELEMENT_TYPE_I8:
case ELEMENT_TYPE_U8:
case ELEMENT_TYPE_R8:
dwAlignment[kk] = max(8, dwAlignment[kk]);
dwClassNonGCBytes[kk] += 8;
break;
case ELEMENT_TYPE_VAR:
case ELEMENT_TYPE_MVAR:
case ELEMENT_TYPE_STRING:
case ELEMENT_TYPE_SZARRAY:
case ELEMENT_TYPE_ARRAY:
case ELEMENT_TYPE_CLASS:
case ELEMENT_TYPE_OBJECT:
dwClassGCHandles[kk] += 1;
break;
case ELEMENT_TYPE_VALUETYPE:
// Statics for valuetypes where the valuetype is defined in this module are handled here. Other valuetype statics utilize the pessimistic model below.
dwClassGCHandles[kk] += 1;
break;
case ELEMENT_TYPE_END:
default:
// The actual element type was ELEMENT_TYPE_VALUETYPE, but the as we don't want to load additional assemblies
// to determine these static offsets, we've fallen back to a pessimistic model.
if (tkValueTypeToken != 0)
{
// We'll have to be pessimistic here
dwClassNonGCBytes[kk] += MAX_PRIMITIVE_FIELD_SIZE;
dwAlignment[kk] = max(MAX_PRIMITIVE_FIELD_SIZE, dwAlignment[kk]);
dwClassGCHandles[kk] += 1;
break;
}
else
{
// field has an unexpected type
ThrowHR(VER_E_FIELD_SIG);
break;
}
}
}
if (pRegularStaticOffsets == NULL && (dwClassGCHandles[0] != 0 || dwClassNonGCBytes[0] != 0))
{
// Lazily allocate table for offsets. We need offsets for GC and non GC areas. We add +1 to use as a sentinel.
pRegularStaticOffsets = (PTR_DWORD)pamTracker->Track(
GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(
(S_SIZE_T(2 * sizeof(DWORD))*(S_SIZE_T(dwNumTypes)+S_SIZE_T(1)))));
for (DWORD i = 0; i < dwIndex; i++) {
pRegularStaticOffsets[i * 2 ] = dwGCHandles[0]*TARGET_POINTER_SIZE;
pRegularStaticOffsets[i * 2 + 1] = dwNonGCBytes[0];
}
}
if (pThreadStaticOffsets == NULL && (dwClassGCHandles[1] != 0 || dwClassNonGCBytes[1] != 0))
{
// Lazily allocate table for offsets. We need offsets for GC and non GC areas. We add +1 to use as a sentinel.
pThreadStaticOffsets = (PTR_DWORD)pamTracker->Track(
GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(
(S_SIZE_T(2 * sizeof(DWORD))*(S_SIZE_T(dwNumTypes)+S_SIZE_T(1)))));
for (DWORD i = 0; i < dwIndex; i++) {
pThreadStaticOffsets[i * 2 ] = dwGCHandles[1]*TARGET_POINTER_SIZE;
pThreadStaticOffsets[i * 2 + 1] = dwNonGCBytes[1];
}
}
}
if (pRegularStaticOffsets != NULL)
{
// Align the offset of non gc statics
dwNonGCBytes[0] = (DWORD) ALIGN_UP(dwNonGCBytes[0], dwAlignment[0]);
// Save current offsets
pRegularStaticOffsets[dwIndex*2] = dwGCHandles[0]*TARGET_POINTER_SIZE;
pRegularStaticOffsets[dwIndex*2 + 1] = dwNonGCBytes[0];
// Increment for next class
dwGCHandles[0] += dwClassGCHandles[0];
dwNonGCBytes[0] += dwClassNonGCBytes[0];
}
if (pThreadStaticOffsets != NULL)
{
// Align the offset of non gc statics
dwNonGCBytes[1] = (DWORD) ALIGN_UP(dwNonGCBytes[1], dwAlignment[1]);
// Save current offsets
pThreadStaticOffsets[dwIndex*2] = dwGCHandles[1]*TARGET_POINTER_SIZE;
pThreadStaticOffsets[dwIndex*2 + 1] = dwNonGCBytes[1];
// Increment for next class
dwGCHandles[1] += dwClassGCHandles[1];
dwNonGCBytes[1] += dwClassNonGCBytes[1];
}
}
m_maxTypeRidStaticsAllocated = dwNumTypes;
if (pRegularStaticOffsets != NULL)
{
pRegularStaticOffsets[dwNumTypes*2] = dwGCHandles[0]*TARGET_POINTER_SIZE;
pRegularStaticOffsets[dwNumTypes*2 + 1] = dwNonGCBytes[0];
}
if (pThreadStaticOffsets != NULL)
{
pThreadStaticOffsets[dwNumTypes*2] = dwGCHandles[1]*TARGET_POINTER_SIZE;
pThreadStaticOffsets[dwNumTypes*2 + 1] = dwNonGCBytes[1];
}
m_pRegularStaticOffsets = pRegularStaticOffsets;
m_pThreadStaticOffsets = pThreadStaticOffsets;
m_dwMaxGCRegularStaticHandles = dwGCHandles[0];
m_dwMaxGCThreadStaticHandles = dwGCHandles[1];
m_dwRegularStaticsBlockSize = dwNonGCBytes[0];
m_dwThreadStaticsBlockSize = dwNonGCBytes[1];
}
void Module::GetOffsetsForRegularStaticData(
mdToken cl,
BOOL bDynamic, DWORD dwGCStaticHandles,
DWORD dwNonGCStaticBytes,
DWORD * pOutStaticHandleOffset,
DWORD * pOutNonGCStaticOffset)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END
*pOutStaticHandleOffset = 0;
*pOutNonGCStaticOffset = 0;
if (!dwGCStaticHandles && !dwNonGCStaticBytes)
{
return;
}
#ifndef CROSSBITNESS_COMPILE
_ASSERTE(OFFSETOF__DomainLocalModule__NormalDynamicEntry__m_pDataBlob == DomainLocalModule::DynamicEntry::GetOffsetOfDataBlob());
#endif
// Statics for instantiated types are allocated dynamically per-instantiation
if (bDynamic)
{
// Non GC statics are embedded in the Dynamic Entry.
*pOutNonGCStaticOffset = OFFSETOF__DomainLocalModule__NormalDynamicEntry__m_pDataBlob;
return;
}
if (m_pRegularStaticOffsets == NULL)
{
THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
}
_ASSERTE(m_pRegularStaticOffsets != (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED);
// We allocate in the big blob.
DWORD index = RidFromToken(cl) - 1;
*pOutStaticHandleOffset = m_pRegularStaticOffsets[index*2];
*pOutNonGCStaticOffset = m_pRegularStaticOffsets[index*2 + 1];
#ifdef CROSSBITNESS_COMPILE
*pOutNonGCStaticOffset += OFFSETOF__DomainLocalModule__m_pDataBlob_ - DomainLocalModule::OffsetOfDataBlob();
#endif
// Check we didnt go out of what we predicted we would need for the class
if (*pOutStaticHandleOffset + TARGET_POINTER_SIZE*dwGCStaticHandles >
m_pRegularStaticOffsets[(index+1)*2] ||
*pOutNonGCStaticOffset + dwNonGCStaticBytes >
m_pRegularStaticOffsets[(index+1)*2 + 1])
{ // It's most likely that this is due to bad metadata, thus the exception. However, the
// previous comments for this bit of code mentioned that this could be a corner case bug
// with static field size estimation, though this is entirely unlikely since the code has
// been this way for at least two releases.
THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
}
}
void Module::GetOffsetsForThreadStaticData(
mdToken cl,
BOOL bDynamic, DWORD dwGCStaticHandles,
DWORD dwNonGCStaticBytes,
DWORD * pOutStaticHandleOffset,
DWORD * pOutNonGCStaticOffset)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END
*pOutStaticHandleOffset = 0;
*pOutNonGCStaticOffset = 0;
if (!dwGCStaticHandles && !dwNonGCStaticBytes)
{
return;
}
#ifndef CROSSBITNESS_COMPILE
_ASSERTE(OFFSETOF__ThreadLocalModule__DynamicEntry__m_pDataBlob == ThreadLocalModule::DynamicEntry::GetOffsetOfDataBlob());
#endif
// Statics for instantiated types are allocated dynamically per-instantiation
if (bDynamic)
{
// Non GC thread statics are embedded in the Dynamic Entry.
*pOutNonGCStaticOffset = OFFSETOF__ThreadLocalModule__DynamicEntry__m_pDataBlob;
return;
}
if (m_pThreadStaticOffsets == NULL)
{
THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
}
_ASSERTE(m_pThreadStaticOffsets != (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED);
// We allocate in the big blob.
DWORD index = RidFromToken(cl) - 1;
*pOutStaticHandleOffset = m_pThreadStaticOffsets[index*2];
*pOutNonGCStaticOffset = m_pThreadStaticOffsets[index*2 + 1];
#ifdef CROSSBITNESS_COMPILE
*pOutNonGCStaticOffset += OFFSETOF__ThreadLocalModule__m_pDataBlob - ThreadLocalModule::GetOffsetOfDataBlob();
#endif
// Check we didnt go out of what we predicted we would need for the class
if (*pOutStaticHandleOffset + TARGET_POINTER_SIZE*dwGCStaticHandles >
m_pThreadStaticOffsets[(index+1)*2] ||
*pOutNonGCStaticOffset + dwNonGCStaticBytes >
m_pThreadStaticOffsets[(index+1)*2 + 1])
{
// It's most likely that this is due to bad metadata, thus the exception. However, the
// previous comments for this bit of code mentioned that this could be a corner case bug
// with static field size estimation, though this is entirely unlikely since the code has
// been this way for at least two releases.
THROW_BAD_FORMAT(BFA_METADATA_CORRUPT, this);
}
}
// initialize Crst controlling the Dynamic IL hashtable
void Module::InitializeDynamicILCrst()
{
Crst * pCrst = new Crst(CrstDynamicIL, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD));
if (InterlockedCompareExchangeT(
&m_debuggerSpecificData.m_pDynamicILCrst, pCrst, NULL) != NULL)
{
delete pCrst;
}
}
// Add a (token, address) pair to the table of IL blobs for reflection/dynamics
// Arguments:
// Input:
// token method token
// blobAddress address of the start of the IL blob address, including the header
// fTemporaryOverride
// is this a permanent override that should go in the
// DynamicILBlobTable, or a temporary one?
// Output: not explicit, but if the pair was not already in the table it will be added.
// Does not add duplicate tokens to the table.
void Module::SetDynamicIL(mdToken token, TADDR blobAddress, BOOL fTemporaryOverride)
{
DynamicILBlobEntry entry = {mdToken(token), TADDR(blobAddress)};
// Lazily allocate a Crst to serialize update access to the info structure.
// Carefully synchronize to ensure we don't leak a Crst in race conditions.
if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
{
InitializeDynamicILCrst();
}
CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
// Figure out which table to fill in
PTR_DynamicILBlobTable &table(fTemporaryOverride ? m_debuggerSpecificData.m_pTemporaryILBlobTable
: m_debuggerSpecificData.m_pDynamicILBlobTable);
// Lazily allocate the hash table.
if (table == NULL)
{
table = PTR_DynamicILBlobTable(new DynamicILBlobTable);
}
table->AddOrReplace(entry);
}
#endif // !DACCESS_COMPILE
// Get the stored address of the IL blob for reflection/dynamics
// Arguments:
// Input:
// token method token
// fAllowTemporary also check the temporary overrides
// Return Value: starting (target) address of the IL blob corresponding to the input token
TADDR Module::GetDynamicIL(mdToken token, BOOL fAllowTemporary)
{
SUPPORTS_DAC;
#ifndef DACCESS_COMPILE
// The Crst to serialize update access to the info structure is lazily allocated.
// If it hasn't been allocated yet, then we don't have any IL blobs (temporary or otherwise)
if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
{
return TADDR(NULL);
}
CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
#endif
// Both hash tables are lazily allocated, so if they're NULL
// then we have no IL blobs
if (fAllowTemporary && m_debuggerSpecificData.m_pTemporaryILBlobTable != NULL)
{
DynamicILBlobEntry entry = m_debuggerSpecificData.m_pTemporaryILBlobTable->Lookup(token);
// Only return a value if the lookup succeeded
if (!DynamicILBlobTraits::IsNull(entry))
{
return entry.m_il;
}
}
if (m_debuggerSpecificData.m_pDynamicILBlobTable == NULL)
{
return TADDR(NULL);
}
DynamicILBlobEntry entry = m_debuggerSpecificData.m_pDynamicILBlobTable->Lookup(token);
// If the lookup fails, it returns the 'NULL' entry
// The 'NULL' entry has m_il set to NULL, so either way we're safe
return entry.m_il;
}
#if !defined(DACCESS_COMPILE)
//---------------------------------------------------------------------------------------
//
// Add instrumented IL offset mapping for the specified method.
//
// Arguments:
// token - the MethodDef token of the method in question
// mapping - the mapping information between original IL offsets and instrumented IL offsets
//
// Notes:
// * Once added, the mapping stays valid until the Module containing the method is destructed.
// * The profiler may potentially update the mapping more than once.
//
void Module::SetInstrumentedILOffsetMapping(mdMethodDef token, InstrumentedILOffsetMapping mapping)
{
ILOffsetMappingEntry entry(token, mapping);
// Lazily allocate a Crst to serialize update access to the hash table.
// Carefully synchronize to ensure we don't leak a Crst in race conditions.
if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
{
InitializeDynamicILCrst();
}
CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
// Lazily allocate the hash table.
if (m_debuggerSpecificData.m_pILOffsetMappingTable == NULL)
{
m_debuggerSpecificData.m_pILOffsetMappingTable = PTR_ILOffsetMappingTable(new ILOffsetMappingTable);
}
ILOffsetMappingEntry currentEntry = m_debuggerSpecificData.m_pILOffsetMappingTable->Lookup(ILOffsetMappingTraits::GetKey(entry));
if (!ILOffsetMappingTraits::IsNull(currentEntry))
currentEntry.m_mapping.Clear();
m_debuggerSpecificData.m_pILOffsetMappingTable->AddOrReplace(entry);
}
#endif // DACCESS_COMPILE
//---------------------------------------------------------------------------------------
//
// Retrieve the instrumented IL offset mapping for the specified method.
//
// Arguments:
// token - the MethodDef token of the method in question
//
// Return Value:
// Return the mapping information between original IL offsets and instrumented IL offsets.
// Check InstrumentedILOffsetMapping::IsNull() to see if any mapping is available.
//
// Notes:
// * Once added, the mapping stays valid until the Module containing the method is destructed.
// * The profiler may potentially update the mapping more than once.
//
InstrumentedILOffsetMapping Module::GetInstrumentedILOffsetMapping(mdMethodDef token)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
}
CONTRACTL_END;
// Lazily allocate a Crst to serialize update access to the hash table.
// If the Crst is NULL, then we couldn't possibly have added any mapping yet, so just return NULL.
if (m_debuggerSpecificData.m_pDynamicILCrst == NULL)
{
InstrumentedILOffsetMapping emptyMapping;
return emptyMapping;
}
CrstHolder ch(m_debuggerSpecificData.m_pDynamicILCrst);
// If the hash table hasn't been created, then we couldn't possibly have added any mapping yet,
// so just return NULL.
if (m_debuggerSpecificData.m_pILOffsetMappingTable == NULL)
{
InstrumentedILOffsetMapping emptyMapping;
return emptyMapping;
}
ILOffsetMappingEntry entry = m_debuggerSpecificData.m_pILOffsetMappingTable->Lookup(token);
return entry.m_mapping;
}
#undef DECODE_TYPEID
#undef ENCODE_TYPEID
#undef IS_ENCODED_TYPEID
#ifndef DACCESS_COMPILE
BOOL Module::IsNoStringInterning()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END
if (!(m_dwPersistedFlags & COMPUTED_STRING_INTERNING))
{
// The flags should be precomputed in native images
_ASSERTE(!HasNativeImage());
// Default is string interning
BOOL fNoStringInterning = FALSE;
HRESULT hr;
// This flag applies to assembly, but it is stored on module so it can be cached in ngen image
// Thus, we should ever need it for manifest module only.
IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
_ASSERTE(mdImport);
mdToken token;
IfFailThrow(mdImport->GetAssemblyFromScope(&token));
const BYTE *pVal;
ULONG cbVal;
hr = mdImport->GetCustomAttributeByName(token,
COMPILATIONRELAXATIONS_TYPE,
(const void**)&pVal, &cbVal);
// Parse the attribute
if (hr == S_OK)
{
CustomAttributeParser cap(pVal, cbVal);
IfFailThrow(cap.SkipProlog());
// Get Flags
UINT32 flags;
IfFailThrow(cap.GetU4(&flags));
if (flags & CompilationRelaxations_NoStringInterning)
{
fNoStringInterning = TRUE;
}
}
#ifdef _DEBUG
static ConfigDWORD g_NoStringInterning;
DWORD dwOverride = g_NoStringInterning.val(CLRConfig::INTERNAL_NoStringInterning);
if (dwOverride == 0)
{
// Disabled
fNoStringInterning = FALSE;
}
else if (dwOverride == 2)
{
// Always true (testing)
fNoStringInterning = TRUE;
}
#endif // _DEBUG
FastInterlockOr(&m_dwPersistedFlags, COMPUTED_STRING_INTERNING |
(fNoStringInterning ? NO_STRING_INTERNING : 0));
}
return !!(m_dwPersistedFlags & NO_STRING_INTERNING);
}
BOOL Module::GetNeutralResourcesLanguage(LPCUTF8 * cultureName, ULONG * cultureNameLength, INT16 * fallbackLocation, BOOL cacheAttribute)
{
STANDARD_VM_CONTRACT;
BOOL retVal = FALSE;
if (!(m_dwPersistedFlags & NEUTRAL_RESOURCES_LANGUAGE_IS_CACHED))
{
const BYTE *pVal = NULL;
ULONG cbVal = 0;
// This flag applies to assembly, but it is stored on module so it can be cached in ngen image
// Thus, we should ever need it for manifest module only.
IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
_ASSERTE(mdImport);
mdToken token;
IfFailThrow(mdImport->GetAssemblyFromScope(&token));
// Check for the existance of the attribute.
HRESULT hr = mdImport->GetCustomAttributeByName(token,"System.Resources.NeutralResourcesLanguageAttribute",(const void **)&pVal, &cbVal);
if (hr == S_OK) {
// we should not have a native image (it would have been cached at ngen time)
_ASSERTE(!HasNativeImage());
CustomAttributeParser cap(pVal, cbVal);
IfFailThrow(cap.SkipProlog());
IfFailThrow(cap.GetString(cultureName, cultureNameLength));
IfFailThrow(cap.GetI2(fallbackLocation));
// Should only be true on Module.Save(). Update flag to show we have the attribute cached
if (cacheAttribute)
FastInterlockOr(&m_dwPersistedFlags, NEUTRAL_RESOURCES_LANGUAGE_IS_CACHED);
retVal = TRUE;
}
}
else
{
*cultureName = m_pszCultureName;
*cultureNameLength = m_CultureNameLength;
*fallbackLocation = m_FallbackLocation;
retVal = TRUE;
#ifdef _DEBUG
// confirm that the NGENed attribute is correct
LPCUTF8 pszCultureNameCheck = NULL;
ULONG cultureNameLengthCheck = 0;
INT16 fallbackLocationCheck = 0;
const BYTE *pVal = NULL;
ULONG cbVal = 0;
IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
_ASSERTE(mdImport);
mdToken token;
IfFailThrow(mdImport->GetAssemblyFromScope(&token));
// Confirm that the attribute exists, and has the save value as when we ngen'd it
HRESULT hr = mdImport->GetCustomAttributeByName(token,"System.Resources.NeutralResourcesLanguageAttribute",(const void **)&pVal, &cbVal);
_ASSERTE(hr == S_OK);
CustomAttributeParser cap(pVal, cbVal);
IfFailThrow(cap.SkipProlog());
IfFailThrow(cap.GetString(&pszCultureNameCheck, &cultureNameLengthCheck));
IfFailThrow(cap.GetI2(&fallbackLocationCheck));
_ASSERTE(cultureNameLengthCheck == m_CultureNameLength);
_ASSERTE(fallbackLocationCheck == m_FallbackLocation);
_ASSERTE(strncmp(pszCultureNameCheck,m_pszCultureName,m_CultureNameLength) == 0);
#endif // _DEBUG
}
return retVal;
}
BOOL Module::HasDefaultDllImportSearchPathsAttribute()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if(IsDefaultDllImportSearchPathsAttributeCached())
{
return (m_dwPersistedFlags & DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS) != 0 ;
}
IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
BOOL attributeIsFound = FALSE;
attributeIsFound = GetDefaultDllImportSearchPathsAttributeValue(mdImport, TokenFromRid(1, mdtAssembly),&m_DefaultDllImportSearchPathsAttributeValue);
if(attributeIsFound)
{
FastInterlockOr(&m_dwPersistedFlags, DEFAULT_DLL_IMPORT_SEARCH_PATHS_IS_CACHED | DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS);
}
else
{
FastInterlockOr(&m_dwPersistedFlags, DEFAULT_DLL_IMPORT_SEARCH_PATHS_IS_CACHED);
}
return (m_dwPersistedFlags & DEFAULT_DLL_IMPORT_SEARCH_PATHS_STATUS) != 0 ;
}
// Returns a BOOL to indicate if we have computed whether compiler has instructed us to
// wrap the non-CLS compliant exceptions or not.
BOOL Module::IsRuntimeWrapExceptionsStatusComputed()
{
LIMITED_METHOD_CONTRACT;
return (m_dwPersistedFlags & COMPUTED_WRAP_EXCEPTIONS);
}
BOOL Module::IsRuntimeWrapExceptions()
{
CONTRACTL
{
THROWS;
if (IsRuntimeWrapExceptionsStatusComputed()) GC_NOTRIGGER; else GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END
if (!(IsRuntimeWrapExceptionsStatusComputed()))
{
// The flags should be precomputed in native images
_ASSERTE(!HasNativeImage());
HRESULT hr;
BOOL fRuntimeWrapExceptions = FALSE;
// This flag applies to assembly, but it is stored on module so it can be cached in ngen image
// Thus, we should ever need it for manifest module only.
IMDInternalImport *mdImport = GetAssembly()->GetManifestImport();
mdToken token;
IfFailGo(mdImport->GetAssemblyFromScope(&token));
const BYTE *pVal;
ULONG cbVal;
hr = mdImport->GetCustomAttributeByName(token,
RUNTIMECOMPATIBILITY_TYPE,
(const void**)&pVal, &cbVal);
// Parse the attribute
if (hr == S_OK)
{
CustomAttributeParser ca(pVal, cbVal);
CaNamedArg namedArgs[1] = {{0}};
// First, the void constructor:
IfFailGo(ParseKnownCaArgs(ca, NULL, 0));
// Then, find the named argument
namedArgs[0].InitBoolField("WrapNonExceptionThrows");
IfFailGo(ParseKnownCaNamedArgs(ca, namedArgs, lengthof(namedArgs)));
if (namedArgs[0].val.boolean)
fRuntimeWrapExceptions = TRUE;
}
ErrExit:
FastInterlockOr(&m_dwPersistedFlags, COMPUTED_WRAP_EXCEPTIONS |
(fRuntimeWrapExceptions ? WRAP_EXCEPTIONS : 0));
}
return !!(m_dwPersistedFlags & WRAP_EXCEPTIONS);
}
BOOL Module::IsPreV4Assembly()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END
if (!(m_dwPersistedFlags & COMPUTED_IS_PRE_V4_ASSEMBLY))
{
// The flags should be precomputed in native images
_ASSERTE(!HasNativeImage());
IMDInternalImport *pImport = GetAssembly()->GetManifestImport();
_ASSERTE(pImport);
BOOL fIsPreV4Assembly = FALSE;
LPCSTR szVersion = NULL;
if (SUCCEEDED(pImport->GetVersionString(&szVersion)))
{
if (szVersion != NULL && strlen(szVersion) > 2)
{
fIsPreV4Assembly = (szVersion[0] == 'v' || szVersion[0] == 'V') &&
(szVersion[1] == '1' || szVersion[1] == '2');
}
}
FastInterlockOr(&m_dwPersistedFlags, COMPUTED_IS_PRE_V4_ASSEMBLY |
(fIsPreV4Assembly ? IS_PRE_V4_ASSEMBLY : 0));
}
return !!(m_dwPersistedFlags & IS_PRE_V4_ASSEMBLY);
}
ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
{
LIMITED_METHOD_CONTRACT;
if (index < numHotGCStaticsMTs)
{
_ASSERTE(ppHotGCStaticsMTs != NULL);
return ppHotGCStaticsMTs + index;
}
else
{
_ASSERTE(ppColdGCStaticsMTs != NULL);
// shift the start of the cold table because all cold offsets are also shifted
return ppColdGCStaticsMTs + (index - numHotGCStaticsMTs);
}
}
DWORD Module::AllocateDynamicEntry(MethodTable *pMT)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
PRECONDITION(pMT->GetModuleForStatics() == this);
PRECONDITION(pMT->IsDynamicStatics());
PRECONDITION(!pMT->ContainsGenericVariables());
}
CONTRACTL_END;
DWORD newId = FastInterlockExchangeAdd((LONG*)&m_cDynamicEntries, 1);
if (newId >= m_maxDynamicEntries)
{
CrstHolder ch(&m_Crst);
if (newId >= m_maxDynamicEntries)
{
SIZE_T maxDynamicEntries = max(16, m_maxDynamicEntries);
while (maxDynamicEntries <= newId)
{
maxDynamicEntries *= 2;
}
DynamicStaticsInfo* pNewDynamicStaticsInfo = (DynamicStaticsInfo*)
(void*)GetLoaderAllocator()->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(DynamicStaticsInfo)) * S_SIZE_T(maxDynamicEntries));
if (m_pDynamicStaticsInfo)
memcpy(pNewDynamicStaticsInfo, m_pDynamicStaticsInfo, sizeof(DynamicStaticsInfo) * m_maxDynamicEntries);
m_pDynamicStaticsInfo = pNewDynamicStaticsInfo;
m_maxDynamicEntries = maxDynamicEntries;
}
}
EnsureWritablePages(&(m_pDynamicStaticsInfo[newId]))->pEnclosingMT = pMT;
LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Assigned dynamic ID %d to %s\n", newId, pMT->GetDebugClassName()));
return newId;
}
void Module::FreeModuleIndex()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if (GetAssembly()->IsDomainNeutral())
{
// We do not recycle ModuleIndexes used by domain neutral Modules.
}
else
{
if (m_ModuleID != NULL)
{
// Module's m_ModuleID should not contain the ID, it should
// contain a pointer to the DLM
_ASSERTE(!Module::IsEncodedModuleIndex((SIZE_T)m_ModuleID));
_ASSERTE(m_ModuleIndex == m_ModuleID->GetModuleIndex());
// Get the ModuleIndex from the DLM and free it
Module::FreeModuleIndex(m_ModuleIndex);
}
else
{
// This was an empty, short-lived Module object that
// was never assigned a ModuleIndex...
}
}
}
ModuleIndex Module::AllocateModuleIndex()
{
DWORD val;
g_pModuleIndexDispenser->NewId(NULL, val);
// For various reasons, the IDs issued by the IdDispenser start at 1.
// Domain neutral module IDs have historically started at 0, and we
// have always assigned ID 0 to mscorlib. Thus, to make it so that
// domain neutral module IDs start at 0, we will subtract 1 from the
// ID that we got back from the ID dispenser.
ModuleIndex index((SIZE_T)(val-1));
return index;
}
void Module::FreeModuleIndex(ModuleIndex index)
{
WRAPPER_NO_CONTRACT;
// We subtracted 1 after we allocated this ID, so we need to
// add 1 before we free it.
DWORD val = index.m_dwIndex + 1;
g_pModuleIndexDispenser->DisposeId(val);
}
void Module::AllocateRegularStaticHandles(AppDomain* pDomain)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
#ifndef CROSSGEN_COMPILE
if (NingenEnabled())
return;
// Allocate the handles we will need. Note that AllocateStaticFieldObjRefPtrs will only
// allocate if pModuleData->GetGCStaticsBasePointerAddress(pMT) != 0, avoiding creating
// handles more than once for a given MT or module
DomainLocalModule *pModuleData = GetDomainLocalModule(pDomain);
_ASSERTE(pModuleData->GetPrecomputedGCStaticsBasePointerAddress() != NULL);
if (this->m_dwMaxGCRegularStaticHandles > 0)
{
// If we're setting up a non-default domain, we want the allocation to look like it's
// coming from the created domain.
// REVISIT_TODO: The comparison "pDomain != GetDomain()" will always be true for domain-neutral
// modules, since GetDomain() will return the SharedDomain, which is NOT an AppDomain.
// Was this intended? If so, there should be a clarifying comment. If not, then we should
// probably do "pDomain != GetAppDomain()" instead.
if (pDomain != GetDomain() &&
pDomain != SystemDomain::System()->DefaultDomain() &&
IsSystem())
{
pDomain->AllocateStaticFieldObjRefPtrsCrossDomain(this->m_dwMaxGCRegularStaticHandles,
pModuleData->GetPrecomputedGCStaticsBasePointerAddress());
}
else
{
pDomain->AllocateStaticFieldObjRefPtrs(this->m_dwMaxGCRegularStaticHandles,
pModuleData->GetPrecomputedGCStaticsBasePointerAddress());
}
// We should throw if we fail to allocate and never hit this assert
_ASSERTE(pModuleData->GetPrecomputedGCStaticsBasePointer() != NULL);
}
#endif // CROSSGEN_COMPILE
}
BOOL Module::IsStaticStoragePrepared(mdTypeDef tkType)
{
LIMITED_METHOD_CONTRACT;
// Right now the design is that we do one static allocation pass during NGEN,
// and a 2nd pass for it at module init time for modules that weren't NGENed or the NGEN
// pass was unsucessful. If we are loading types after that then we must use dynamic
// static storage. These dynamic statics require an additional indirection so they
// don't perform quite as well.
//
// This check was created for the scenario where a profiler adds additional types
// however it seems likely this check would also accurately handle other dynamic
// scenarios such as ref.emit and EnC as long as they are adding new types and
// not new statics to existing types.
_ASSERTE(TypeFromToken(tkType) == mdtTypeDef);
return m_maxTypeRidStaticsAllocated >= RidFromToken(tkType);
}
void Module::AllocateStatics(AllocMemTracker *pamTracker)
{
STANDARD_VM_CONTRACT;
if (IsResource())
{
m_dwRegularStaticsBlockSize = DomainLocalModule::OffsetOfDataBlob();
m_dwThreadStaticsBlockSize = ThreadLocalModule::OffsetOfDataBlob();
// If it has no code, we don't have to allocate anything
LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Resource module %s. No statics neeeded\n", GetSimpleName()));
_ASSERTE(m_maxTypeRidStaticsAllocated == 0);
return;
}
#ifdef FEATURE_PREJIT
if (m_pRegularStaticOffsets == (PTR_DWORD) NGEN_STATICS_ALLCLASSES_WERE_LOADED)
{
_ASSERTE(HasNativeImage());
// This is an ngen image and all the classes were loaded at ngen time, so we're done.
LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: 'Complete' Native image found, no statics parsing needed for module %s.\n", GetSimpleName()));
// typeDefs rids 0 and 1 aren't included in the count, thus X typeDefs means rid X+1 is valid
_ASSERTE(m_maxTypeRidStaticsAllocated == GetMDImport()->GetCountWithTokenKind(mdtTypeDef) + 1);
return;
}
#endif
LOG((LF_CLASSLOADER, LL_INFO10000, "STATICS: Allocating statics for module %s\n", GetSimpleName()));
// Build the offset table, which will tell us what the offsets for the statics of each class are (one offset for gc handles, one offset
// for non gc types)
BuildStaticsOffsets(pamTracker);
}
// This method will report GC static refs of the module. It doesn't have to be complete (ie, it's
// currently used to opportunistically get more concurrency in the marking of statics), so it currently
// ignores any statics that are not preallocated (ie: won't report statics from IsDynamicStatics() MT)
// The reason this function is in Module and not in DomainFile (together with DomainLocalModule is because
// for shared modules we need a very fast way of getting to the DomainLocalModule. For that we use
// a table in DomainLocalBlock that's indexed with a module ID
//
// This method is a secondary way for the GC to find statics, and it is only used when we are on
// a multiproc machine and we are using the ServerHeap. The primary way used by the GC to find
// statics is through the handle table. Module::AllocateRegularStaticHandles() allocates a GC handle
// from the handle table, and the GC will trace this handle and find the statics.
void Module::EnumRegularStaticGCRefs(AppDomain* pAppDomain, promote_func* fn, ScanContext* sc)
{
CONTRACT_VOID
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACT_END;
_ASSERTE(GCHeapUtilities::IsGCInProgress() &&
GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
DomainLocalModule *pModuleData = GetDomainLocalModule(pAppDomain);
DWORD dwHandles = m_dwMaxGCRegularStaticHandles;
if (IsResource())
{
RETURN;
}
LOG((LF_GC, LL_INFO100, "Scanning statics for module %s\n", GetSimpleName()));
OBJECTREF* ppObjectRefs = pModuleData->GetPrecomputedGCStaticsBasePointer();
for (DWORD i = 0 ; i < dwHandles ; i++)
{
// Handles are allocated in SetDomainFile (except for bootstrapped mscorlib). In any
// case, we shouldnt get called if the module hasn't had it's handles allocated (as we
// only get here if IsActive() is true, which only happens after SetDomainFile(), which
// is were we allocate handles.
_ASSERTE(ppObjectRefs);
fn((Object **)(ppObjectRefs+i), sc, 0);
}
LOG((LF_GC, LL_INFO100, "Done scanning statics for module %s\n", GetSimpleName()));
RETURN;
}
void Module::SetDomainFile(DomainFile *pDomainFile)
{
CONTRACTL
{
INSTANCE_CHECK;
PRECONDITION(CheckPointer(pDomainFile));
PRECONDITION(IsManifest() == pDomainFile->IsAssembly());
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
DomainLocalModule* pModuleData = 0;
// Do we need to allocate memory for the non GC statics?
if ((GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())|| m_ModuleID == NULL)
{
// Allocate memory for the module statics.
LoaderAllocator *pLoaderAllocator = NULL;
if (GetAssembly()->IsCollectible())
{
pLoaderAllocator = GetAssembly()->GetLoaderAllocator();
}
else
{
pLoaderAllocator = pDomainFile->GetAppDomain()->GetLoaderAllocator();
}
SIZE_T size = GetDomainLocalModuleSize();
LOG((LF_CLASSLOADER, LL_INFO10, "STATICS: Allocating %i bytes for precomputed statics in module %S in LoaderAllocator %p\n",
size, this->GetDebugName(), pLoaderAllocator));
// We guarantee alignment for 64-bit regular statics on 32-bit platforms even without FEATURE_64BIT_ALIGNMENT for performance reasons.
_ASSERTE(size >= DomainLocalModule::OffsetOfDataBlob());
pModuleData = (DomainLocalModule*)(void*)
pLoaderAllocator->GetHighFrequencyHeap()->AllocAlignedMem(
size, MAX_PRIMITIVE_FIELD_SIZE);
// Note: Memory allocated on loader heap is zero filled
// memset(pModuleData, 0, size);
// Verify that the space is really zero initialized
_ASSERTE(pModuleData->GetPrecomputedGCStaticsBasePointer() == NULL);
// Make sure that the newly allocated DomainLocalModule gets
// a copy of the domain-neutral module ID.
if (GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())
{
// If the module was loaded as domain-neutral, we can find the ID by
// casting 'm_ModuleID'.
_ASSERTE(Module::IDToIndex((SIZE_T)m_ModuleID) == this->m_ModuleIndex);
pModuleData->m_ModuleIndex = Module::IDToIndex((SIZE_T)m_ModuleID);
// Eventually I want to just do this instead...
//pModuleData->m_ModuleIndex = this->m_ModuleIndex;
}
else
{
// If the module was loaded as domain-specific, then we need to assign
// this module a domain-neutral module ID.
pModuleData->m_ModuleIndex = Module::AllocateModuleIndex();
m_ModuleIndex = pModuleData->m_ModuleIndex;
}
}
else
{
pModuleData = this->m_ModuleID;
LOG((LF_CLASSLOADER, LL_INFO10, "STATICS: Allocation not needed for ngened non shared module %s in Appdomain %08x\n"));
}
if (GetAssembly()->IsDomainNeutral() && !IsSingleAppDomain())
{
DomainLocalBlock *pLocalBlock;
{
pLocalBlock = pDomainFile->GetAppDomain()->GetDomainLocalBlock();
pLocalBlock->SetModuleSlot(GetModuleIndex(), pModuleData);
}
pLocalBlock->SetDomainFile(GetModuleIndex(), pDomainFile);
}
else
{
// Non shared case, module points directly to the statics. In ngen case
// m_pDomainModule is already set for the non shared case
if (m_ModuleID == NULL)
{
m_ModuleID = pModuleData;
}
m_ModuleID->SetDomainFile(pDomainFile);
}
// Allocate static handles now.
// NOTE: Bootstrapping issue with mscorlib - we will manually allocate later
if (g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT] != NULL)
AllocateRegularStaticHandles(pDomainFile->GetAppDomain());
}
#ifndef CROSSGEN_COMPILE
OBJECTREF Module::GetExposedObject()
{
CONTRACT(OBJECTREF)
{
INSTANCE_CHECK;
POSTCONDITION(RETVAL != NULL);
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACT_END;
RETURN GetDomainFile()->GetExposedModuleObject();
}
#endif // CROSSGEN_COMPILE
//
// AllocateMap allocates the RID maps based on the size of the current
// metadata (if any)
//
void Module::AllocateMaps()
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
enum
{
TYPEDEF_MAP_INITIAL_SIZE = 5,
TYPEREF_MAP_INITIAL_SIZE = 5,
MEMBERDEF_MAP_INITIAL_SIZE = 10,
GENERICPARAM_MAP_INITIAL_SIZE = 5,
GENERICTYPEDEF_MAP_INITIAL_SIZE = 5,
FILEREFERENCES_MAP_INITIAL_SIZE = 5,
ASSEMBLYREFERENCES_MAP_INITIAL_SIZE = 5,
};
PTR_TADDR pTable = NULL;
if (IsResource())
return;
if (IsReflection())
{
// For dynamic modules, it is essential that we at least have a TypeDefToMethodTable
// map with an initial block. Otherwise, all the iterators will abort on an
// initial empty table and we will e.g. corrupt the backpatching chains during
// an appdomain unload.
m_TypeDefToMethodTableMap.dwCount = TYPEDEF_MAP_INITIAL_SIZE;
// The above is essential. The following ones are precautionary.
m_TypeRefToMethodTableMap.dwCount = TYPEREF_MAP_INITIAL_SIZE;
m_MethodDefToDescMap.dwCount = MEMBERDEF_MAP_INITIAL_SIZE;
m_FieldDefToDescMap.dwCount = MEMBERDEF_MAP_INITIAL_SIZE;
m_GenericParamToDescMap.dwCount = GENERICPARAM_MAP_INITIAL_SIZE;
m_GenericTypeDefToCanonMethodTableMap.dwCount = TYPEDEF_MAP_INITIAL_SIZE;
m_FileReferencesMap.dwCount = FILEREFERENCES_MAP_INITIAL_SIZE;
m_ManifestModuleReferencesMap.dwCount = ASSEMBLYREFERENCES_MAP_INITIAL_SIZE;
m_MethodDefToPropertyInfoMap.dwCount = MEMBERDEF_MAP_INITIAL_SIZE;
}
else
{
IMDInternalImport * pImport = GetMDImport();
// Get # TypeDefs (add 1 for COR_GLOBAL_PARENT_TOKEN)
m_TypeDefToMethodTableMap.dwCount = pImport->GetCountWithTokenKind(mdtTypeDef)+2;
// Get # TypeRefs
m_TypeRefToMethodTableMap.dwCount = pImport->GetCountWithTokenKind(mdtTypeRef)+1;
// Get # MethodDefs
m_MethodDefToDescMap.dwCount = pImport->GetCountWithTokenKind(mdtMethodDef)+1;
// Get # FieldDefs
m_FieldDefToDescMap.dwCount = pImport->GetCountWithTokenKind(mdtFieldDef)+1;
// Get # GenericParams
m_GenericParamToDescMap.dwCount = pImport->GetCountWithTokenKind(mdtGenericParam)+1;
// Get the number of FileReferences in the map
m_FileReferencesMap.dwCount = pImport->GetCountWithTokenKind(mdtFile)+1;
// Get the number of AssemblyReferences in the map
m_ManifestModuleReferencesMap.dwCount = pImport->GetCountWithTokenKind(mdtAssemblyRef)+1;
// These maps are only added to during NGen, so for other scenarios leave them empty
if (IsCompilationProcess())
{
m_GenericTypeDefToCanonMethodTableMap.dwCount = m_TypeDefToMethodTableMap.dwCount;
m_MethodDefToPropertyInfoMap.dwCount = m_MethodDefToDescMap.dwCount;
}
else
{
m_GenericTypeDefToCanonMethodTableMap.dwCount = 0;
m_MethodDefToPropertyInfoMap.dwCount = 0;
}
}
S_SIZE_T nTotal;
nTotal += m_TypeDefToMethodTableMap.dwCount;
nTotal += m_TypeRefToMethodTableMap.dwCount;
nTotal += m_MethodDefToDescMap.dwCount;
nTotal += m_FieldDefToDescMap.dwCount;
nTotal += m_GenericParamToDescMap.dwCount;
nTotal += m_GenericTypeDefToCanonMethodTableMap.dwCount;
nTotal += m_FileReferencesMap.dwCount;
nTotal += m_ManifestModuleReferencesMap.dwCount;
nTotal += m_MethodDefToPropertyInfoMap.dwCount;
_ASSERTE (m_pAssembly && m_pAssembly->GetLowFrequencyHeap());
pTable = (PTR_TADDR)(void*)m_pAssembly->GetLowFrequencyHeap()->AllocMem(nTotal * S_SIZE_T(sizeof(TADDR)));
// Note: Memory allocated on loader heap is zero filled
// memset(pTable, 0, nTotal * sizeof(void*));
m_TypeDefToMethodTableMap.pNext = NULL;
m_TypeDefToMethodTableMap.supportedFlags = TYPE_DEF_MAP_ALL_FLAGS;
m_TypeDefToMethodTableMap.pTable = pTable;
m_TypeRefToMethodTableMap.pNext = NULL;
m_TypeRefToMethodTableMap.supportedFlags = TYPE_REF_MAP_ALL_FLAGS;
m_TypeRefToMethodTableMap.pTable = &pTable[m_TypeDefToMethodTableMap.dwCount];
m_MethodDefToDescMap.pNext = NULL;
m_MethodDefToDescMap.supportedFlags = METHOD_DEF_MAP_ALL_FLAGS;
m_MethodDefToDescMap.pTable = &m_TypeRefToMethodTableMap.pTable[m_TypeRefToMethodTableMap.dwCount];
m_FieldDefToDescMap.pNext = NULL;
m_FieldDefToDescMap.supportedFlags = FIELD_DEF_MAP_ALL_FLAGS;
m_FieldDefToDescMap.pTable = &m_MethodDefToDescMap.pTable[m_MethodDefToDescMap.dwCount];
m_GenericParamToDescMap.pNext = NULL;
m_GenericParamToDescMap.supportedFlags = GENERIC_PARAM_MAP_ALL_FLAGS;
m_GenericParamToDescMap.pTable = &m_FieldDefToDescMap.pTable[m_FieldDefToDescMap.dwCount];
m_GenericTypeDefToCanonMethodTableMap.pNext = NULL;
m_GenericTypeDefToCanonMethodTableMap.supportedFlags = GENERIC_TYPE_DEF_MAP_ALL_FLAGS;
m_GenericTypeDefToCanonMethodTableMap.pTable = &m_GenericParamToDescMap.pTable[m_GenericParamToDescMap.dwCount];
m_FileReferencesMap.pNext = NULL;
m_FileReferencesMap.supportedFlags = FILE_REF_MAP_ALL_FLAGS;
m_FileReferencesMap.pTable = &m_GenericTypeDefToCanonMethodTableMap.pTable[m_GenericTypeDefToCanonMethodTableMap.dwCount];
m_ManifestModuleReferencesMap.pNext = NULL;
m_ManifestModuleReferencesMap.supportedFlags = MANIFEST_MODULE_MAP_ALL_FLAGS;
m_ManifestModuleReferencesMap.pTable = &m_FileReferencesMap.pTable[m_FileReferencesMap.dwCount];
m_MethodDefToPropertyInfoMap.pNext = NULL;
m_MethodDefToPropertyInfoMap.supportedFlags = PROPERTY_INFO_MAP_ALL_FLAGS;
m_MethodDefToPropertyInfoMap.pTable = &m_ManifestModuleReferencesMap.pTable[m_ManifestModuleReferencesMap.dwCount];
}
//
// FreeClassTables frees the classes in the module
//
void Module::FreeClassTables()
{
CONTRACTL
{
INSTANCE_CHECK;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
if (m_dwTransientFlags & CLASSES_FREED)
return;
FastInterlockOr(&m_dwTransientFlags, CLASSES_FREED);
// disable ibc here because it can cause errors during the destruction of classes
IBCLoggingDisabler disableLogging;
#if _DEBUG
DebugLogRidMapOccupancy();
#endif
//
// Free the types filled out in the TypeDefToEEClass map
//
// Go through each linked block
LookupMap<PTR_MethodTable>::Iterator typeDefIter(&m_TypeDefToMethodTableMap);
while (typeDefIter.Next())
{
MethodTable * pMT = typeDefIter.GetElement();
if (pMT != NULL && pMT->IsRestored())
{
pMT->GetClass()->Destruct(pMT);
}
}
// Now do the same for constructed types (arrays and instantiated generic types)
if (IsTenured()) // If we're destructing because of an error during the module's creation, we'll play it safe and not touch this table as its memory is freed by a
{ // separate AllocMemTracker. Though you're supposed to destruct everything else before destructing the AllocMemTracker, this is an easy invariant to break so
// we'll play extra safe on this end.
if (m_pAvailableParamTypes != NULL)
{
EETypeHashTable::Iterator it(m_pAvailableParamTypes);
EETypeHashEntry *pEntry;
while (m_pAvailableParamTypes->FindNext(&it, &pEntry))
{
TypeHandle th = pEntry->GetTypeHandle();
if (!th.IsRestored())
continue;
#ifdef FEATURE_COMINTEROP
// Some MethodTables/TypeDescs have COM interop goo attached to them which must be released
if (!th.IsTypeDesc())
{
MethodTable *pMT = th.AsMethodTable();
if (pMT->HasCCWTemplate() && (!pMT->IsZapped() || pMT->GetZapModule() == this))
{
// code:MethodTable::GetComCallWrapperTemplate() may go through canonical methodtable indirection cell.
// The module load could be aborted before completing code:FILE_LOAD_EAGER_FIXUPS phase that's responsible
// for resolving pre-restored indirection cells, so we have to check for it here explicitly.
if (CORCOMPILE_IS_POINTER_TAGGED(pMT->GetCanonicalMethodTableFixup()))
continue;
ComCallWrapperTemplate *pTemplate = pMT->GetComCallWrapperTemplate();
if (pTemplate != NULL)
{
pTemplate->Release();
}
}
}
else if (th.IsArray())
{
ComCallWrapperTemplate *pTemplate = th.AsArray()->GetComCallWrapperTemplate();
if (pTemplate != NULL)
{
pTemplate->Release();
}
}
#endif // FEATURE_COMINTEROP
// We need to call destruct on instances of EEClass whose "canonical" dependent lives in this table
// There is nothing interesting to destruct on array EEClass
if (!th.IsTypeDesc())
{
MethodTable * pMT = th.AsMethodTable();
if (pMT->IsCanonicalMethodTable() && (!pMT->IsZapped() || pMT->GetZapModule() == this))
pMT->GetClass()->Destruct(pMT);
}
}
}
}
}
#endif // !DACCESS_COMPILE
ClassLoader *Module::GetClassLoader()
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
_ASSERTE(m_pAssembly != NULL);
return m_pAssembly->GetLoader();
}
PTR_BaseDomain Module::GetDomain()
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
_ASSERTE(m_pAssembly != NULL);
return m_pAssembly->GetDomain();
}
#ifndef DACCESS_COMPILE
#ifndef CROSSGEN_COMPILE
void Module::StartUnload()
{
WRAPPER_NO_CONTRACT;
#ifdef PROFILING_SUPPORTED
{
BEGIN_PIN_PROFILER(CORProfilerTrackModuleLoads());
if (!IsBeingUnloaded())
{
// Profiler is causing some peripheral class loads. Probably this just needs
// to be turned into a Fault_not_fatal and moved to a specific place inside the profiler.
EX_TRY
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ModuleUnloadStarted((ModuleID) this);
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
}
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
#ifdef FEATURE_PREJIT
if (g_IBCLogger.InstrEnabled())
{
Thread * pThread = GetThread();
ThreadLocalIBCInfo* pInfo = pThread->GetIBCInfo();
// Acquire the Crst lock before creating the IBCLoggingDisabler object.
// Only one thread at a time can be processing an IBC logging event.
CrstHolder lock(g_IBCLogger.GetSync());
{
IBCLoggingDisabler disableLogging( pInfo ); // runs IBCLoggingDisabler::DisableLogging
// Write out the method profile data
/*hr=*/WriteMethodProfileDataLogFile(true);
}
}
#endif // FEATURE_PREJIT
SetBeingUnloaded();
}
#endif // CROSSGEN_COMPILE
void Module::ReleaseILData(void)
{
WRAPPER_NO_CONTRACT;
ReleaseISymUnmanagedReader();
}
//---------------------------------------------------------------------------------------
//
// Simple wrapper around calling IsAfContentType_WindowsRuntime() against the flags
// returned from the PEAssembly's GetFlagsNoTrigger()
//
// Return Value:
// nonzero iff we successfully determined pModule is a WinMD. FALSE if pModule is not
// a WinMD, or we fail trying to find out.
//
BOOL Module::IsWindowsRuntimeModule()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
CAN_TAKE_LOCK; // Accesses metadata directly, which takes locks
MODE_ANY;
}
CONTRACTL_END;
BOOL fRet = FALSE;
DWORD dwFlags;
if (FAILED(GetAssembly()->GetManifestFile()->GetFlagsNoTrigger(&dwFlags)))
return FALSE;
return IsAfContentType_WindowsRuntime(dwFlags);
}
BOOL Module::IsInCurrentVersionBubble()
{
LIMITED_METHOD_CONTRACT;
#ifdef FEATURE_NATIVE_IMAGE_GENERATION
if (!IsCompilationProcess())
return TRUE;
// The module being compiled is always part of the current version bubble
AppDomain * pAppDomain = GetAppDomain();
if (pAppDomain->IsCompilationDomain() && pAppDomain->ToCompilationDomain()->GetTargetModule() == this)
return TRUE;
if (IsReadyToRunCompilation())
return FALSE;
#ifdef FEATURE_COMINTEROP
if (g_fNGenWinMDResilient)
return !GetAssembly()->IsWinMD();
#endif
return TRUE;
#else // FEATURE_NATIVE_IMAGE_GENERATION
return TRUE;
#endif // FEATURE_NATIVE_IMAGE_GENERATION
}
//---------------------------------------------------------------------------------------
//
// WinMD-aware helper to grab a readable public metadata interface. Any place that thinks
// it wants to use Module::GetRWImporter + QI now should use this wrapper instead.
//
// Arguments:
// * dwOpenFlags - Combo from CorOpenFlags. Better not contain ofWrite!
// * riid - Public IID requested
// * ppvInterface - [out] Requested interface. On success, *ppvInterface is returned
// refcounted; caller responsible for Release.
//
// Return Value:
// HRESULT indicating success or failure.
//
HRESULT Module::GetReadablePublicMetaDataInterface(DWORD dwOpenFlags, REFIID riid, LPVOID * ppvInterface)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
CAN_TAKE_LOCK; // IsWindowsRuntimeModule accesses metadata directly, which takes locks
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE((dwOpenFlags & ofWrite) == 0);
// Temporary place to store public, AddRef'd interface pointers
ReleaseHolder<IUnknown> pIUnkPublic;
// Temporary place to store the IUnknown from which we'll do the final QI to get the
// requested public interface. Any assignment to pIUnk assumes pIUnk does not need
// to do a Release() (either the interface was internal and not AddRef'd, or was
// public and will be released by the above holder).
IUnknown * pIUnk = NULL;
HRESULT hr = S_OK;
// Normally, we just get an RWImporter to do the QI on, and we're on our way.
EX_TRY
{
pIUnk = GetRWImporter();
}
EX_CATCH_HRESULT_NO_ERRORINFO(hr);
if (FAILED(hr) && IsWindowsRuntimeModule())
{
// WinMD modules don't like creating RW importers. They also (currently)
// have no plumbing to get to their public metadata interfaces from the
// Module. So we actually have to start from scratch at the dispenser.
// To start with, get a dispenser, and get the metadata memory blob we've
// already loaded. If either of these fail, just return the error HRESULT
// from the above GetRWImporter() call.
// We'll get an addref'd IMetaDataDispenser, so use a holder to release it
ReleaseHolder<IMetaDataDispenser> pDispenser;
if (FAILED(InternalCreateMetaDataDispenser(IID_IMetaDataDispenser, &pDispenser)))
{
_ASSERTE(FAILED(hr));
return hr;
}
COUNT_T cbMetadata = 0;
PTR_CVOID pvMetadata = GetAssembly()->GetManifestFile()->GetLoadedMetadata(&cbMetadata);
if ((pvMetadata == NULL) || (cbMetadata == 0))
{
_ASSERTE(FAILED(hr));
return hr;
}
// Now that the pieces are ready, we can use the riid specified by the
// profiler in this call to the dispenser to get the requested interface. If
// this fails, then this is the interesting HRESULT for the caller to see.
//
// We'll get an AddRef'd public interface, so use a holder to release it
hr = pDispenser->OpenScopeOnMemory(
pvMetadata,
cbMetadata,
(dwOpenFlags | ofReadOnly), // Force ofReadOnly on behalf of the profiler
riid,
&pIUnkPublic);
if (FAILED(hr))
return hr;
// Set pIUnk so we can do the final QI from it below as we do in the other
// cases.
pIUnk = pIUnkPublic;
}
// Get the requested interface
if (SUCCEEDED(hr) && (ppvInterface != NULL))
{
_ASSERTE(pIUnk != NULL);
hr = pIUnk->QueryInterface(riid, (void **) ppvInterface);
}
return hr;
}
// a special token that indicates no reader could be created - don't try again
static ISymUnmanagedReader* const k_pInvalidSymReader = (ISymUnmanagedReader*)0x1;
#if defined(FEATURE_ISYM_READER) && !defined(CROSSGEN_COMPILE)
ISymUnmanagedReader *Module::GetISymUnmanagedReaderNoThrow(void)
{
CONTRACT(ISymUnmanagedReader *)
{
INSTANCE_CHECK;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
NOTHROW;
WRAPPER(GC_TRIGGERS);
MODE_ANY;
}
CONTRACT_END;
ISymUnmanagedReader *ret = NULL;
EX_TRY
{
ret = GetISymUnmanagedReader();
}
EX_CATCH
{
// We swallow any exception and say that we simply couldn't get a reader by returning NULL.
// The only type of error that should be possible here is OOM.
/* DISABLED due to Dev10 bug 619495
CONSISTENCY_CHECK_MSG(
GET_EXCEPTION()->GetHR() == E_OUTOFMEMORY,
"Exception from GetISymUnmanagedReader");
*/
}
EX_END_CATCH(RethrowTerminalExceptions);
RETURN (ret);
}
ISymUnmanagedReader *Module::GetISymUnmanagedReader(void)
{
CONTRACT(ISymUnmanagedReader *)
{
INSTANCE_CHECK;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
THROWS;
WRAPPER(GC_TRIGGERS);
MODE_ANY;
}
CONTRACT_END;
// No symbols for resource modules
if (IsResource())
RETURN NULL;
if (g_fEEShutDown)
RETURN NULL;
// Verify that symbol reading is permitted for this module.
// If we know we've already created a symbol reader, don't bother checking. There is
// no advantage to allowing symbol reading to be turned off if we've already created the reader.
// Note that we can't just put this code in the creation block below because we might have to
// call managed code to resolve security policy, and we can't do that while holding a lock.
// There is no disadvantage other than a minor perf cost to calling this unnecessarily, so the
// race on m_pISymUnmanagedReader here is OK. The perf cost is minor because the only real
// work is done by the security system which caches the result.
if( m_pISymUnmanagedReader == NULL && !IsSymbolReadingEnabled() )
RETURN NULL;
// Take the lock for the m_pISymUnmanagedReader
// This ensures that we'll only ever attempt to create one reader at a time, and we won't
// create a reader if we're in the middle of destroying one that has become stale.
// Actual access to the reader can safely occur outside the lock as long as it has its own
// AddRef which we take inside the lock at the bottom of this method.
CrstHolder holder(&m_ISymUnmanagedReaderCrst);
UINT lastErrorMode = 0;
// If we haven't created a reader yet, do so now
if (m_pISymUnmanagedReader == NULL)
{
// Mark our reader as invalid so that if we fail to create the reader
// (including if an exception is thrown), we won't keep trying.
m_pISymUnmanagedReader = k_pInvalidSymReader;
// There are 4 main cases here:
// 1. Assembly is on disk and we'll get the symbols from a file next to the assembly
// 2. Assembly is provided by the host and we'll get the symbols from the host
// 3. Assembly was loaded in-memory (by byte array or ref-emit), and symbols were
// provided along with it.
// 4. Assembly was loaded in-memory but no symbols were provided.
// Determine whether we should be looking in memory for the symbols (cases 2 & 3)
bool fInMemorySymbols = ( m_file->IsIStream() || GetInMemorySymbolStream() );
if( !fInMemorySymbols && m_file->GetPath().IsEmpty() )
{
// Case 4. We don't have a module path, an IStream or an in memory symbol stream,
// so there is no-where to try and get symbols from.
RETURN (NULL);
}
// Create a binder to find the reader.
//
// <REVISIT_TODO>@perf: this is slow, creating and destroying the binder every
// time. We should cache this somewhere, but I'm not 100% sure
// where right now...</REVISIT_TODO>
HRESULT hr = S_OK;
SafeComHolder<ISymUnmanagedBinder> pBinder;
if (g_pDebugInterface == NULL)
{
// @TODO: this is reachable when debugging!
UNREACHABLE_MSG("About to CoCreateInstance! This code should not be "
"reachable or needs to be reimplemented for CoreCLR!");
}
if (this->GetInMemorySymbolStreamFormat() == eSymbolFormatILDB)
{
// We've got in-memory ILDB symbols, create the ILDB symbol binder
// Note that in this case, we must be very careful not to use diasymreader.dll
// at all - we don't trust it, and shouldn't run any code in it
IfFailThrow(IldbSymbolsCreateInstance(CLSID_CorSymBinder_SxS, IID_ISymUnmanagedBinder, (void**)&pBinder));
}
else
{
// We're going to be working with Windows PDB format symbols. Attempt to CoCreate the symbol binder.
// CoreCLR supports not having a symbol reader installed, so CoCreate searches the PATH env var
// and then tries coreclr dll location.
// On desktop, the framework installer is supposed to install diasymreader.dll as well
// and so this shouldn't happen.
hr = FakeCoCreateInstanceEx(CLSID_CorSymBinder_SxS, NATIVE_SYMBOL_READER_DLL, IID_ISymUnmanagedBinder, (void**)&pBinder, NULL);
if (FAILED(hr))
{
PathString symbolReaderPath;
hr = GetHModuleDirectory(GetModuleInst(), symbolReaderPath);
if (FAILED(hr))
{
RETURN (NULL);
}
symbolReaderPath.Append(NATIVE_SYMBOL_READER_DLL);
hr = FakeCoCreateInstanceEx(CLSID_CorSymBinder_SxS, symbolReaderPath.GetUnicode(), IID_ISymUnmanagedBinder, (void**)&pBinder, NULL);
if (FAILED(hr))
{
RETURN (NULL);
}
}
}
LOG((LF_CORDB, LL_INFO10, "M::GISUR: Created binder\n"));
// Note: we change the error mode here so we don't get any popups as the PDB symbol reader attempts to search the
// hard disk for files.
lastErrorMode = SetErrorMode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
SafeComHolder<ISymUnmanagedReader> pReader;
if (fInMemorySymbols)
{
SafeComHolder<IStream> pIStream( NULL );
// If debug stream is already specified, don't bother to go through fusion
// This is the common case for case 2 (hosted modules) and case 3 (Ref.Emit).
if (GetInMemorySymbolStream() )
{
if( IsReflection() )
{
// If this is Reflection.Emit, we must clone the stream because another thread may
// update it when someone is using the reader we create here leading to AVs.
// Note that the symbol stream should be up to date since we flush the writer
// after every addition in Module::AddClass.
IfFailThrow(GetInMemorySymbolStream()->Clone(&pIStream));
}
else
{
// The stream is not changing. Just add-ref to it.
pIStream = GetInMemorySymbolStream();
pIStream->AddRef();
}
}
if (SUCCEEDED(hr))
{
hr = pBinder->GetReaderFromStream(GetRWImporter(), pIStream, &pReader);
}
}
else
{
// The assembly is on disk, so try and load symbols based on the path to the assembly (case 1)
const SString &path = m_file->GetPath();
// Call Fusion to ensure that any PDB's are shadow copied before
// trying to get a symbol reader. This has to be done once per
// Assembly.
// for this to work with winmds we cannot simply call GetRWImporter() as winmds are RO
// and thus don't implement the RW interface. so we call this wrapper function which knows
// how to get a IMetaDataImport interface regardless of the underlying module type.
ReleaseHolder<IUnknown> pUnk = NULL;
hr = GetReadablePublicMetaDataInterface(ofReadOnly, IID_IMetaDataImport, &pUnk);
if (SUCCEEDED(hr))
hr = pBinder->GetReaderForFile(pUnk, path, NULL, &pReader);
}
SetErrorMode(lastErrorMode);
if (SUCCEEDED(hr))
{
m_pISymUnmanagedReader = pReader.Extract();
LOG((LF_CORDB, LL_INFO10, "M::GISUR: Loaded symbols for module %S\n", GetDebugName()));
}
else
{
// We failed to create the reader, don't try again next time
LOG((LF_CORDB, LL_INFO10, "M::GISUR: Failed to load symbols for module %S\n", GetDebugName()));
_ASSERTE( m_pISymUnmanagedReader == k_pInvalidSymReader );
}
} // if( m_pISymUnmanagedReader == NULL )
// If we previously failed to create the reader, return NULL
if (m_pISymUnmanagedReader == k_pInvalidSymReader)
{
RETURN (NULL);
}
// Success - return an AddRef'd copy of the reader
m_pISymUnmanagedReader->AddRef();
RETURN (m_pISymUnmanagedReader);
}
#endif // FEATURE_ISYM_READER && !CROSSGEN_COMPILE
BOOL Module::IsSymbolReadingEnabled()
{
CONTRACTL
{
INSTANCE_CHECK;
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// The only time we need symbols available is for debugging and taking stack traces,
// neither of which can be done if the assembly can't run. The advantage of being strict
// is that there is a perf penalty adding types to a module if you must support reading
// symbols at any time. If symbols don't need to be accesible then we can
// optimize by only commiting symbols when the assembly is saved to disk. See DDB 671107.
if(!GetAssembly()->HasRunAccess())
{
return FALSE;
}
// If the module has symbols in-memory (eg. RefEmit) that are in ILDB
// format, then there isn't any reason not to supply them. The reader
// code is always available, and we trust it's security.
if (this->GetInMemorySymbolStreamFormat() == eSymbolFormatILDB)
{
return TRUE;
}
#ifdef DEBUGGING_SUPPORTED
if (!g_pDebugInterface)
{
// if debugging is disabled (no debug pack installed), do not load symbols
// This is done for two reasons. We don't completely trust the security of
// the diasymreader.dll code, so we don't want to use it in mainline scenarios.
// Secondly, there's not reason that diasymreader.dll will even necssarily be
// be on the machine if the debug pack isn't installed.
return FALSE;
}
#endif // DEBUGGING_SUPPORTED
return TRUE;
}
// At this point, this is only called when we're creating an appdomain
// out of an array of bytes, so we'll keep the IStream that we create
// around in case the debugger attaches later (including detach & re-attach!)
void Module::SetSymbolBytes(LPCBYTE pbSyms, DWORD cbSyms)
{
STANDARD_VM_CONTRACT;
// Create a IStream from the memory for the syms.
SafeComHolder<CGrowableStream> pStream(new CGrowableStream());
// Do not need to AddRef the CGrowableStream because the constructor set it to 1
// ref count already. The Module will keep a copy for its own use.
// Make sure to set the symbol stream on the module before
// attempting to send UpdateModuleSyms messages up for it.
SetInMemorySymbolStream(pStream, eSymbolFormatPDB);
// This can only be called when the module is being created. No-one should have
// tried to use the symbols yet, and so there should not be a reader.
// If instead, we wanted to call this when a reader could have been created, we need to
// serialize access by taking the reader lock, and flush the old reader by calling
// code:Module.ReleaseISymUnmanagedReader
_ASSERTE( m_pISymUnmanagedReader == NULL );
#ifdef LOGGING
LPCWSTR pName = NULL;
pName = GetDebugName();
#endif // LOGGING
ULONG cbWritten;
DWORD dwError = pStream->Write((const void *)pbSyms,
(ULONG)cbSyms,
&cbWritten);