diff --git a/src/coreclr/debug/daccess/fntableaccess.cpp b/src/coreclr/debug/daccess/fntableaccess.cpp index 82867035be533..c40592d958584 100644 --- a/src/coreclr/debug/daccess/fntableaccess.cpp +++ b/src/coreclr/debug/daccess/fntableaccess.cpp @@ -171,7 +171,7 @@ static NTSTATUS OutOfProcessFunctionTableCallback_JIT(IN ReadMemoryFunction move(Hp, pHp); - if (pHp == MinAddress) + if (Hp.GetModuleBase() == MinAddress) { DWORD_PTR pThisHeader; DWORD_PTR hdrOffset; diff --git a/src/coreclr/debug/daccess/fntableaccess.h b/src/coreclr/debug/daccess/fntableaccess.h index de84acfc61239..690056d005a1e 100644 --- a/src/coreclr/debug/daccess/fntableaccess.h +++ b/src/coreclr/debug/daccess/fntableaccess.h @@ -41,6 +41,18 @@ struct FakeHeapList DWORD_PTR pHdrMap; // changed from DWORD* size_t maxCodeHeapSize; size_t reserveForJumpStubs; +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) + DWORD_PTR CLRPersonalityRoutine; +#endif + + DWORD_PTR GetModuleBase() + { +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) + return CLRPersonalityRoutine; +#else + return mapBase; +#endif + } }; typedef struct _FakeHpRealCodeHdr diff --git a/src/coreclr/debug/daccess/request.cpp b/src/coreclr/debug/daccess/request.cpp index 6600f80ba2aca..31638507e9430 100644 --- a/src/coreclr/debug/daccess/request.cpp +++ b/src/coreclr/debug/daccess/request.cpp @@ -3481,7 +3481,7 @@ ClrDataAccess::TraverseLoaderHeap(CLRDATA_ADDRESS loaderHeapAddr, VISITHEAP pFun TADDR addr = PTR_TO_TADDR(block->pVirtualAddress); size_t size = block->dwVirtualSize; - BOOL bCurrentBlock = (block == pLoaderHeap->m_pCurBlock); + BOOL bCurrentBlock = (block == pLoaderHeap->m_pFirstBlock); pFunc(addr,size,bCurrentBlock); @@ -3538,7 +3538,7 @@ ClrDataAccess::TraverseVirtCallStubHeap(CLRDATA_ADDRESS pAppDomain, VCSHeapType TADDR addr = PTR_TO_TADDR(block->pVirtualAddress); size_t size = block->dwVirtualSize; - BOOL bCurrentBlock = (block == pLoaderHeap->m_pCurBlock); + BOOL bCurrentBlock = (block == pLoaderHeap->m_pFirstBlock); pFunc(addr, size, bCurrentBlock); block = block->pNext; diff --git a/src/coreclr/inc/loaderheap.h b/src/coreclr/inc/loaderheap.h index 68d50f6d637d3..96fec35be0ce4 100644 --- a/src/coreclr/inc/loaderheap.h +++ b/src/coreclr/inc/loaderheap.h @@ -201,8 +201,6 @@ class UnlockedLoaderHeap PTR_BYTE m_pPtrToEndOfCommittedRegion; PTR_BYTE m_pEndReservedRegion; - PTR_LoaderHeapBlock m_pCurBlock; - // When we need to ClrVirtualAlloc() MEM_RESERVE a new set of pages, number of bytes to reserve DWORD m_dwReserveBlockSize; @@ -301,6 +299,12 @@ class UnlockedLoaderHeap return m_pEndReservedRegion - m_pAllocPtr; } + PTR_BYTE UnlockedGetAllocPtr() + { + LIMITED_METHOD_CONTRACT; + return m_pAllocPtr; + } + private: // Get some more committed pages - either commit some more in the current reserved region, or, if it // has run out, reserve another set of pages @@ -848,6 +852,18 @@ class ExplicitControlLoaderHeap : public UnlockedLoaderHeap WRAPPER_NO_CONTRACT; return UnlockedGetReservedBytesFree(); } + + PTR_BYTE GetAllocPtr() + { + WRAPPER_NO_CONTRACT; + return UnlockedGetAllocPtr(); + } + + void ReservePages(size_t size) + { + WRAPPER_NO_CONTRACT; + UnlockedReservePages(size); + } }; diff --git a/src/coreclr/utilcode/loaderheap.cpp b/src/coreclr/utilcode/loaderheap.cpp index 8cfbba756592c..33974b9e29076 100644 --- a/src/coreclr/utilcode/loaderheap.cpp +++ b/src/coreclr/utilcode/loaderheap.cpp @@ -910,7 +910,6 @@ UnlockedLoaderHeap::UnlockedLoaderHeap(DWORD dwReserveBlockSize, } CONTRACTL_END; - m_pCurBlock = NULL; m_pFirstBlock = NULL; m_dwReserveBlockSize = dwReserveBlockSize; @@ -982,6 +981,8 @@ UnlockedLoaderHeap::~UnlockedLoaderHeap() fSuccess = ClrVirtualFree(pVirtualAddress, 0, MEM_RELEASE); _ASSERTE(fSuccess); } + + delete pSearch; } if (m_reservedBlock.m_fReleaseMemory) @@ -1047,12 +1048,22 @@ size_t UnlockedLoaderHeap::GetBytesAvailReservedRegion() #define SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve) \ m_pPtrToEndOfCommittedRegion = (BYTE *) (pData) + (dwSizeToCommit); \ - m_pAllocPtr = (BYTE *) (pData) + sizeof(LoaderHeapBlock); \ + m_pAllocPtr = (BYTE *) (pData); \ m_pEndReservedRegion = (BYTE *) (pData) + (dwSizeToReserve); #ifndef DACCESS_COMPILE +void ReleaseReservedMemory(BYTE* value) +{ + if (value) + { + ClrVirtualFree(value, 0, MEM_RELEASE); + } +} + +using ReservedMemoryHolder = SpecializedWrapper; + BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) { CONTRACTL @@ -1065,13 +1076,10 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) size_t dwSizeToReserve; - // Add sizeof(LoaderHeapBlock) - dwSizeToCommit += sizeof(LoaderHeapBlock); - // Round to page size again dwSizeToCommit = ALIGN_UP(dwSizeToCommit, GetOsPageSize()); - void *pData = NULL; + ReservedMemoryHolder pData = NULL; BOOL fReleaseMemory = TRUE; // We were provided with a reserved memory block at instance creation time, so use it if it's big enough. @@ -1079,7 +1087,7 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) m_reservedBlock.dwVirtualSize >= dwSizeToCommit) { // Get the info out of the block. - pData = m_reservedBlock.pVirtualAddress; + pData = (PTR_BYTE)m_reservedBlock.pVirtualAddress; dwSizeToReserve = m_reservedBlock.dwVirtualSize; fReleaseMemory = m_reservedBlock.m_fReleaseMemory; @@ -1126,16 +1134,17 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) return FALSE; } + if (!fReleaseMemory) + { + pData.SuppressRelease(); + } + // Commit first set of pages, since it will contain the LoaderHeapBlock void *pTemp = ClrVirtualAlloc(pData, dwSizeToCommit, MEM_COMMIT, (m_Options & LHF_EXECUTABLE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE); if (pTemp == NULL) { //_ASSERTE(!"Unable to ClrVirtualAlloc commit in a loaderheap"); - // Unable to commit - release pages - if (fReleaseMemory) - ClrVirtualFree(pData, 0, MEM_RELEASE); - return FALSE; } @@ -1147,44 +1156,27 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) ((const BYTE *) pData) + dwSizeToReserve, (void *) this)) { - - if (fReleaseMemory) - ClrVirtualFree(pData, 0, MEM_RELEASE); - return FALSE; } } - m_dwTotalAlloc += dwSizeToCommit; - - LoaderHeapBlock *pNewBlock; + LoaderHeapBlock *pNewBlock = new (nothrow) LoaderHeapBlock; + if (pNewBlock == NULL) + { + return FALSE; + } -#if defined(HOST_OSX) && defined(HOST_ARM64) - // Always assume we are touching executable heap - auto jitWriteEnableHolder = PAL_JITWriteEnable(true); -#endif // defined(HOST_OSX) && defined(HOST_ARM64) + m_dwTotalAlloc += dwSizeToCommit; - pNewBlock = (LoaderHeapBlock *) pData; + pData.SuppressRelease(); pNewBlock->dwVirtualSize = dwSizeToReserve; pNewBlock->pVirtualAddress = pData; - pNewBlock->pNext = NULL; + pNewBlock->pNext = m_pFirstBlock; pNewBlock->m_fReleaseMemory = fReleaseMemory; - LoaderHeapBlock *pCurBlock = m_pCurBlock; - - // Add to linked list - while (pCurBlock != NULL && - pCurBlock->pNext != NULL) - pCurBlock = pCurBlock->pNext; - - if (pCurBlock != NULL) - m_pCurBlock->pNext = pNewBlock; - else - m_pFirstBlock = pNewBlock; - - // If we want to use the memory immediately... - m_pCurBlock = pNewBlock; + // Add to the linked list + m_pFirstBlock = pNewBlock; SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve); @@ -1827,16 +1819,11 @@ void UnlockedLoaderHeap::DumpFreeList() size_t dwsize = pBlock->m_dwSize; BOOL ccbad = FALSE; BOOL sizeunaligned = FALSE; - BOOL sizesmall = FALSE; if ( 0 != (dwsize & ALLOC_ALIGN_CONSTANT) ) { sizeunaligned = TRUE; } - if ( dwsize < sizeof(LoaderHeapBlock)) - { - sizesmall = TRUE; - } for (size_t i = sizeof(LoaderHeapFreeBlock); i < dwsize; i++) { @@ -1850,7 +1837,6 @@ void UnlockedLoaderHeap::DumpFreeList() printf("Addr = %pxh, Size = %lxh", pBlock, ((ULONG)dwsize)); if (ccbad) printf(" *** ERROR: NOT CC'd ***"); if (sizeunaligned) printf(" *** ERROR: size not a multiple of ALLOC_ALIGN_CONSTANT ***"); - if (sizesmall) printf(" *** ERROR: size smaller than sizeof(LoaderHeapFreeBlock) ***"); printf("\n"); pBlock = pBlock->m_pNext; diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index f1cc4b674cf9d..e47b3206761d4 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -1838,11 +1838,29 @@ CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKi void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize) { - LIMITED_METHOD_CONTRACT; - FreeBlock * pBlock = (FreeBlock *)pMem; - pBlock->m_pNext = m_pFreeBlocks; - pBlock->m_dwSize = dwSize; - m_pFreeBlocks = pBlock; + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + // The new "nothrow" below failure is handled in a non-fault way, so + // make sure that callers with FORBID_FAULT can call this method without + // firing the contract violation assert. + PERMANENT_CONTRACT_VIOLATION(FaultViolation, ReasonContractInfrastructure); + + FreeBlock * pBlock = new (nothrow) FreeBlock; + // In the OOM case we don't add the block to the list of free blocks + // as we are in a FORBID_FAULT code path. + if (pBlock != NULL) + { + pBlock->m_pNext = m_pFreeBlocks; + pBlock->m_pBlock = pMem; + pBlock->m_dwSize = dwSize; + m_pFreeBlocks = pBlock; + } } void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock) @@ -1850,7 +1868,18 @@ void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock) LIMITED_METHOD_CONTRACT; FreeBlock * pBlock = *ppBlock; *ppBlock = pBlock->m_pNext; - ZeroMemory(pBlock, sizeof(FreeBlock)); + delete pBlock; +} + +CodeFragmentHeap::~CodeFragmentHeap() +{ + FreeBlock* pBlock = m_pFreeBlocks; + while (pBlock != NULL) + { + FreeBlock *pNextBlock = pBlock->m_pNext; + delete pBlock; + pBlock = pNextBlock; + } } TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize @@ -1869,9 +1898,6 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR)); - if (dwRequestedSize < sizeof(FreeBlock)) - dwRequestedSize = sizeof(FreeBlock); - // We will try to batch up allocation of small blocks into one large allocation #define SMALL_BLOCK_THRESHOLD 0x100 SIZE_T nFreeSmallBlocks = 0; @@ -1881,7 +1907,7 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize while (*ppFreeBlock != NULL) { FreeBlock * pFreeBlock = *ppFreeBlock; - if (((BYTE *)pFreeBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize) + if (((BYTE *)pFreeBlock->m_pBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock->m_pBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize) { if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize) ppBestFit = ppFreeBlock; @@ -1898,7 +1924,7 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize SIZE_T dwSize; if (ppBestFit != NULL) { - pMem = *ppBestFit; + pMem = (*ppBestFit)->m_pBlock; dwSize = (*ppBestFit)->m_dwSize; RemoveBlock(ppBestFit); @@ -1946,8 +1972,6 @@ void CodeFragmentHeap::RealBackoutMem(void *pMem { CrstHolder ch(&m_CritSec); - _ASSERTE(dwSize >= sizeof(FreeBlock)); - #if defined(HOST_OSX) && defined(HOST_ARM64) auto jitWriteEnableHolder = PAL_JITWriteEnable(true); #endif // defined(HOST_OSX) && defined(HOST_ARM64) @@ -2232,14 +2256,22 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // this first allocation is critical as it sets up correctly the loader heap info - HeapList *pHp = (HeapList*)pCodeHeap->m_LoaderHeap.AllocMem(sizeof(HeapList)); + HeapList *pHp = new HeapList; + +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) + pHp->CLRPersonalityRoutine = (BYTE *)pCodeHeap->m_LoaderHeap.AllocMem(JUMP_ALLOCATE_SIZE); +#else + // Ensure that the heap has a reserved block of memory and so the GetReservedBytesFree() + // and GetAllocPtr() calls below return nonzero values. + pCodeHeap->m_LoaderHeap.ReservePages(1); +#endif pHp->pHeap = pCodeHeap; size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree(); size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize)); - pHp->startAddress = (TADDR)pHp + sizeof(HeapList); + pHp->startAddress = (TADDR)pCodeHeap->m_LoaderHeap.GetAllocPtr(); pHp->endAddress = pHp->startAddress; pHp->maxCodeHeapSize = heapSize; @@ -2259,7 +2291,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap )); #ifdef TARGET_64BIT - emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException); + emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException); #endif // TARGET_64BIT pCodeHeap.SuppressRelease(); @@ -2376,13 +2408,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi } #endif - // VSW 433293 - // SETUP_NEW_BLOCK reserves the first sizeof(LoaderHeapBlock) bytes for LoaderHeapBlock. - // In other word, the first m_pAllocPtr starts at sizeof(LoaderHeapBlock) bytes - // after the allocated memory. Therefore, we need to take it into account. - size_t requestAndHeadersSize = sizeof(LoaderHeapBlock) + sizeof(HeapList) + initialRequestSize; - - size_t reserveSize = requestAndHeadersSize; + size_t reserveSize = initialRequestSize; if (reserveSize < minReserveSize) reserveSize = minReserveSize; reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); @@ -2420,7 +2446,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi EX_TRY { - TADDR pStartRange = (TADDR) pHp; + TADDR pStartRange = pHp->GetModuleBase(); TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize]; ExecutionManager::AddCodeRange(pStartRange, @@ -2444,8 +2470,8 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi // If we failed to alloc memory in ExecutionManager::AddCodeRange() // then we will delete the LoaderHeap that we allocated - // pHp is allocated in pHeap, so only need to delete the LoaderHeap itself delete pHp->pHeap; + delete pHp; pHp = NULL; } @@ -2677,7 +2703,7 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t re pCodeHdr->SetMethodDesc(pMD); #ifdef FEATURE_EH_FUNCLETS pCodeHdr->SetNumberOfUnwindInfos(nUnwindInfos); - *pModuleBase = (TADDR)pCodeHeap; + *pModuleBase = pCodeHeap->GetModuleBase(); #endif NibbleMapSet(pCodeHeap, pCode, TRUE); @@ -3020,7 +3046,7 @@ void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader)); pCodeHdr->SetStubCodeBlockKind(kind); - NibbleMapSet(pCodeHeap, (TADDR)mem, TRUE); + NibbleMapSet(pCodeHeap, mem, TRUE); // Record the jump stub reservation pCodeHeap->reserveForJumpStubs += requestInfo.getReserveForJumpStubs(); @@ -3517,9 +3543,9 @@ void EEJitManager::DeleteCodeHeap(HeapList *pHeapList) pHp->SetNext(pHeapList->GetNext()); } - DeleteEEFunctionTable((PVOID)pHeapList); + DeleteEEFunctionTable((PVOID)pHeapList->GetModuleBase()); - ExecutionManager::DeleteRange((TADDR)pHeapList); + ExecutionManager::DeleteRange((TADDR)pHeapList->GetModuleBase()); LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n", (const BYTE*)pHeapList->startAddress, @@ -3531,6 +3557,7 @@ void EEJitManager::DeleteCodeHeap(HeapList *pHeapList) // delete pHeapList->pHeap; CodeHeap* pHeap = pHeapList->pHeap; delete pHeap; + delete pHeapList; } #endif // #ifndef DACCESS_COMPILE diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 97d8b5f2b7f30..85a857b09d726 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -478,12 +478,19 @@ typedef struct _HeapList size_t maxCodeHeapSize;// Size of the entire contiguous block of memory size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block -#if defined(TARGET_AMD64) - BYTE CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE]; // jump thunk to personality routine -#elif defined(TARGET_ARM64) - UINT32 CLRPersonalityRoutine[JUMP_ALLOCATE_SIZE/sizeof(UINT32)]; // jump thunk to personality routine +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) + BYTE* CLRPersonalityRoutine; // jump thunk to personality routine #endif + TADDR GetModuleBase() + { +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) + return (TADDR)CLRPersonalityRoutine; + #else + return (TADDR)mapBase; +#endif + } + PTR_HeapList GetNext() { SUPPORTS_DAC; return hpNext; } @@ -662,6 +669,7 @@ class CodeFragmentHeap : public ILoaderHeapBackout struct FreeBlock { DPTR(FreeBlock) m_pNext; // Next block + void *m_pBlock; SIZE_T m_dwSize; // Size of this block (includes size of FreeBlock) }; typedef DPTR(FreeBlock) PTR_FreeBlock; @@ -676,7 +684,7 @@ class CodeFragmentHeap : public ILoaderHeapBackout public: CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind); - virtual ~CodeFragmentHeap() {} + virtual ~CodeFragmentHeap(); TaggedMemAllocPtr RealAllocAlignedMem(size_t dwRequestedSize ,unsigned dwAlignment @@ -1241,9 +1249,7 @@ class ExecutionManager static ULONG GetCLRPersonalityRoutineValue() { LIMITED_METHOD_CONTRACT; - static_assert_no_msg(offsetof(HeapList, CLRPersonalityRoutine) == - (size_t)((ULONG)offsetof(HeapList, CLRPersonalityRoutine))); - return offsetof(HeapList, CLRPersonalityRoutine); + return 0; } #endif // TARGET_64BIT diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 22785e9d60362..1a5320bb5fb1f 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -397,8 +397,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) size_t ReserveBlockSize = pInfo->getRequestSize(); // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation - ReserveBlockSize += sizeof(TrackAllocation) + sizeof(HeapList) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; - + ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; // reserve ReserveBlockSize rounded-up to VIRTUAL_ALLOC_RESERVE_GRANULARITY of memory ReserveBlockSize = ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); @@ -428,15 +427,23 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) m_ApproximateLargestBlock = ReserveBlockSize; m_pAllocator = pInfo->m_pAllocator; - TrackAllocation *pTracker = AllocMemory_NoThrow(0, sizeof(HeapList), sizeof(void*), 0); + HeapList* pHp = new HeapList; + + TrackAllocation *pTracker = NULL; + +#if defined(TARGET_AMD64) || defined(TARGET_ARM64) + + pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) { // This should only ever happen with fault injection _ASSERTE(g_pConfig->ShouldInjectFault(INJECTFAULT_DYNAMICCODEHEAP)); + delete pHp; ThrowOutOfMemory(); } - HeapList* pHp = (HeapList *)(pTracker + 1); + pHp->CLRPersonalityRoutine = (BYTE *)(pTracker + 1); +#endif pHp->hpNext = NULL; pHp->pHeap = (PTR_CodeHeap)this; @@ -446,17 +453,17 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap creation {0x%p} - size available 0x%p, private data ptr [0x%p, 0x%p]\n", (HostCodeHeap*)this, m_TotalBytesAvailable, pTracker, pTracker->size)); - // It is imporant to exclude the CLRPersonalityRoutine from the tracked range - pHp->startAddress = dac_cast(m_pBaseAddr) + pTracker->size; + // It is important to exclude the CLRPersonalityRoutine from the tracked range + pHp->startAddress = dac_cast(m_pBaseAddr) + (pTracker ? pTracker->size : 0); pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align pHp->pHdrMap = NULL; pHp->endAddress = pHp->startAddress; - pHp->maxCodeHeapSize = m_TotalBytesAvailable - pTracker->size; + pHp->maxCodeHeapSize = m_TotalBytesAvailable - (pTracker ? pTracker->size : 0); pHp->reserveForJumpStubs = 0; #ifdef HOST_64BIT - emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException); + emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException); #endif size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(pHp->maxCodeHeapSize));