Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move metadata off the executable heaps #52912

Merged
merged 4 commits into from
May 20, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions src/coreclr/inc/loaderheap.h
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,12 @@ class UnlockedLoaderHeap
return m_pEndReservedRegion - m_pAllocPtr;
}

PTR_BYTE UnlockedGetAllocPtr()
{
LIMITED_METHOD_CONTRACT;
return m_pAllocPtr;
}

private:
// Get some more committed pages - either commit some more in the current reserved region, or, if it
// has run out, reserve another set of pages
Expand Down Expand Up @@ -848,6 +854,18 @@ class ExplicitControlLoaderHeap : public UnlockedLoaderHeap
WRAPPER_NO_CONTRACT;
return UnlockedGetReservedBytesFree();
}

PTR_BYTE GetAllocPtr()
{
WRAPPER_NO_CONTRACT;
return UnlockedGetAllocPtr();
}

void ReservePages(size_t size)
{
WRAPPER_NO_CONTRACT;
UnlockedReservePages(size);
}
};


Expand Down
57 changes: 28 additions & 29 deletions src/coreclr/utilcode/loaderheap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -982,6 +982,8 @@ UnlockedLoaderHeap::~UnlockedLoaderHeap()
fSuccess = ClrVirtualFree(pVirtualAddress, 0, MEM_RELEASE);
_ASSERTE(fSuccess);
}

delete pSearch;
}

if (m_reservedBlock.m_fReleaseMemory)
Expand Down Expand Up @@ -1047,12 +1049,22 @@ size_t UnlockedLoaderHeap::GetBytesAvailReservedRegion()

#define SETUP_NEW_BLOCK(pData, dwSizeToCommit, dwSizeToReserve) \
m_pPtrToEndOfCommittedRegion = (BYTE *) (pData) + (dwSizeToCommit); \
m_pAllocPtr = (BYTE *) (pData) + sizeof(LoaderHeapBlock); \
m_pAllocPtr = (BYTE *) (pData); \
m_pEndReservedRegion = (BYTE *) (pData) + (dwSizeToReserve);


#ifndef DACCESS_COMPILE

void ReleaseReservedMemory(BYTE* value)
{
if (value)
{
ClrVirtualFree(value, 0, MEM_RELEASE);
}
}

using ReservedMemoryHolder = SpecializedWrapper<BYTE, ReleaseReservedMemory>;

BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
{
CONTRACTL
Expand All @@ -1065,21 +1077,18 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)

size_t dwSizeToReserve;

// Add sizeof(LoaderHeapBlock)
dwSizeToCommit += sizeof(LoaderHeapBlock);

// Round to page size again
dwSizeToCommit = ALIGN_UP(dwSizeToCommit, GetOsPageSize());

void *pData = NULL;
ReservedMemoryHolder pData = NULL;
BOOL fReleaseMemory = TRUE;

// We were provided with a reserved memory block at instance creation time, so use it if it's big enough.
if (m_reservedBlock.pVirtualAddress != NULL &&
m_reservedBlock.dwVirtualSize >= dwSizeToCommit)
{
// Get the info out of the block.
pData = m_reservedBlock.pVirtualAddress;
pData = (PTR_BYTE)m_reservedBlock.pVirtualAddress;
dwSizeToReserve = m_reservedBlock.dwVirtualSize;
fReleaseMemory = m_reservedBlock.m_fReleaseMemory;

Expand Down Expand Up @@ -1126,16 +1135,17 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
return FALSE;
}

if (!fReleaseMemory)
{
pData.SuppressRelease();
}

// Commit first set of pages, since it will contain the LoaderHeapBlock
void *pTemp = ClrVirtualAlloc(pData, dwSizeToCommit, MEM_COMMIT, (m_Options & LHF_EXECUTABLE) ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE);
if (pTemp == NULL)
{
//_ASSERTE(!"Unable to ClrVirtualAlloc commit in a loaderheap");

// Unable to commit - release pages
if (fReleaseMemory)
ClrVirtualFree(pData, 0, MEM_RELEASE);

return FALSE;
}

Expand All @@ -1147,24 +1157,19 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
((const BYTE *) pData) + dwSizeToReserve,
(void *) this))
{

if (fReleaseMemory)
ClrVirtualFree(pData, 0, MEM_RELEASE);

return FALSE;
}
}

m_dwTotalAlloc += dwSizeToCommit;

LoaderHeapBlock *pNewBlock;
LoaderHeapBlock *pNewBlock = new (nothrow) LoaderHeapBlock;
if (pNewBlock == NULL)
{
return FALSE;
}

#if defined(HOST_OSX) && defined(HOST_ARM64)
// Always assume we are touching executable heap
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
m_dwTotalAlloc += dwSizeToCommit;

pNewBlock = (LoaderHeapBlock *) pData;
pData.SuppressRelease();

pNewBlock->dwVirtualSize = dwSizeToReserve;
pNewBlock->pVirtualAddress = pData;
Expand All @@ -1179,7 +1184,7 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit)
pCurBlock = pCurBlock->pNext;

if (pCurBlock != NULL)
m_pCurBlock->pNext = pNewBlock;
pCurBlock->pNext = pNewBlock;
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This fixes a bug that I've discovered while modifying the surrounding code.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This linked list looks overengineered. I do not see a good reason why this maintains both m_pCurBlock and m_pFirstBlock. I think a single pointer that points to the head would be enough.

else
m_pFirstBlock = pNewBlock;

Expand Down Expand Up @@ -1827,16 +1832,11 @@ void UnlockedLoaderHeap::DumpFreeList()
size_t dwsize = pBlock->m_dwSize;
BOOL ccbad = FALSE;
BOOL sizeunaligned = FALSE;
BOOL sizesmall = FALSE;

if ( 0 != (dwsize & ALLOC_ALIGN_CONSTANT) )
{
sizeunaligned = TRUE;
}
if ( dwsize < sizeof(LoaderHeapBlock))
{
sizesmall = TRUE;
}

for (size_t i = sizeof(LoaderHeapFreeBlock); i < dwsize; i++)
{
Expand All @@ -1850,7 +1850,6 @@ void UnlockedLoaderHeap::DumpFreeList()
printf("Addr = %pxh, Size = %lxh", pBlock, ((ULONG)dwsize));
if (ccbad) printf(" *** ERROR: NOT CC'd ***");
if (sizeunaligned) printf(" *** ERROR: size not a multiple of ALLOC_ALIGN_CONSTANT ***");
if (sizesmall) printf(" *** ERROR: size smaller than sizeof(LoaderHeapFreeBlock) ***");
printf("\n");

pBlock = pBlock->m_pNext;
Expand Down
72 changes: 44 additions & 28 deletions src/coreclr/vm/codeman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1839,18 +1839,36 @@ CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKi
void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize)
{
LIMITED_METHOD_CONTRACT;
FreeBlock * pBlock = (FreeBlock *)pMem;
pBlock->m_pNext = m_pFreeBlocks;
pBlock->m_dwSize = dwSize;
m_pFreeBlocks = pBlock;

FreeBlock * pBlock = (FreeBlock *)malloc(sizeof(FreeBlock));
// In the OOM case we don't add the block to the list of free blocks
// as we are in a FORBID_FAULT code path.
janvorli marked this conversation as resolved.
Show resolved Hide resolved
if (pBlock != NULL)
{
pBlock->m_pNext = m_pFreeBlocks;
pBlock->m_pBlock = pMem;
pBlock->m_dwSize = dwSize;
m_pFreeBlocks = pBlock;
}
}

void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock)
{
LIMITED_METHOD_CONTRACT;
FreeBlock * pBlock = *ppBlock;
*ppBlock = pBlock->m_pNext;
ZeroMemory(pBlock, sizeof(FreeBlock));
free(pBlock);
}

CodeFragmentHeap::~CodeFragmentHeap()
{
FreeBlock* pBlock = m_pFreeBlocks;
while (pBlock != NULL)
{
FreeBlock *pNextBlock = pBlock->m_pNext;
free(pBlock);
pBlock = pNextBlock;
}
}

TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize
Expand All @@ -1869,9 +1887,6 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize

dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR));

if (dwRequestedSize < sizeof(FreeBlock))
dwRequestedSize = sizeof(FreeBlock);

// We will try to batch up allocation of small blocks into one large allocation
#define SMALL_BLOCK_THRESHOLD 0x100
SIZE_T nFreeSmallBlocks = 0;
Expand All @@ -1881,7 +1896,7 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize
while (*ppFreeBlock != NULL)
{
FreeBlock * pFreeBlock = *ppFreeBlock;
if (((BYTE *)pFreeBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize)
if (((BYTE *)pFreeBlock->m_pBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock->m_pBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize)
{
if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize)
ppBestFit = ppFreeBlock;
Expand All @@ -1898,7 +1913,7 @@ TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t dwRequestedSize
SIZE_T dwSize;
if (ppBestFit != NULL)
{
pMem = *ppBestFit;
pMem = (*ppBestFit)->m_pBlock;
dwSize = (*ppBestFit)->m_dwSize;

RemoveBlock(ppBestFit);
Expand Down Expand Up @@ -1946,8 +1961,6 @@ void CodeFragmentHeap::RealBackoutMem(void *pMem
{
CrstHolder ch(&m_CritSec);

_ASSERTE(dwSize >= sizeof(FreeBlock));

#if defined(HOST_OSX) && defined(HOST_ARM64)
auto jitWriteEnableHolder = PAL_JITWriteEnable(true);
#endif // defined(HOST_OSX) && defined(HOST_ARM64)
Expand Down Expand Up @@ -2232,14 +2245,22 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap


// this first allocation is critical as it sets up correctly the loader heap info
HeapList *pHp = (HeapList*)pCodeHeap->m_LoaderHeap.AllocMem(sizeof(HeapList));
HeapList *pHp = new HeapList;

#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
pHp->CLRPersonalityRoutine = (BYTE *)pCodeHeap->m_LoaderHeap.AllocMem(JUMP_ALLOCATE_SIZE);
#else
// Ensure that the heap has a reserved block of memory and so the GetReservedBytesFree()
// and GetAllocPtr() calls below return nonzero values.
pCodeHeap->m_LoaderHeap.ReservePages(1);
#endif

pHp->pHeap = pCodeHeap;

size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree();
size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize));

pHp->startAddress = (TADDR)pHp + sizeof(HeapList);
pHp->startAddress = (TADDR)pCodeHeap->m_LoaderHeap.GetAllocPtr();

pHp->endAddress = pHp->startAddress;
pHp->maxCodeHeapSize = heapSize;
Expand All @@ -2259,7 +2280,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
));

#ifdef TARGET_64BIT
emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
emitJump(pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
#endif // TARGET_64BIT

pCodeHeap.SuppressRelease();
Expand Down Expand Up @@ -2376,13 +2397,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi
}
#endif

// <BUGNUM> VSW 433293 </BUGNUM>
// SETUP_NEW_BLOCK reserves the first sizeof(LoaderHeapBlock) bytes for LoaderHeapBlock.
// In other word, the first m_pAllocPtr starts at sizeof(LoaderHeapBlock) bytes
// after the allocated memory. Therefore, we need to take it into account.
size_t requestAndHeadersSize = sizeof(LoaderHeapBlock) + sizeof(HeapList) + initialRequestSize;

size_t reserveSize = requestAndHeadersSize;
size_t reserveSize = initialRequestSize;
if (reserveSize < minReserveSize)
reserveSize = minReserveSize;
reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
Expand Down Expand Up @@ -2420,7 +2435,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi

EX_TRY
{
TADDR pStartRange = (TADDR) pHp;
TADDR pStartRange = pHp->GetModuleBase();
TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize];

ExecutionManager::AddCodeRange(pStartRange,
Expand All @@ -2444,8 +2459,8 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi
// If we failed to alloc memory in ExecutionManager::AddCodeRange()
// then we will delete the LoaderHeap that we allocated

// pHp is allocated in pHeap, so only need to delete the LoaderHeap itself
delete pHp->pHeap;
delete pHp;

pHp = NULL;
}
Expand Down Expand Up @@ -2677,7 +2692,7 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, size_t re
pCodeHdr->SetMethodDesc(pMD);
#ifdef FEATURE_EH_FUNCLETS
pCodeHdr->SetNumberOfUnwindInfos(nUnwindInfos);
*pModuleBase = (TADDR)pCodeHeap;
*pModuleBase = pCodeHeap->GetModuleBase();
#endif

NibbleMapSet(pCodeHeap, pCode, TRUE);
Expand Down Expand Up @@ -3020,7 +3035,7 @@ void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment
CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
pCodeHdr->SetStubCodeBlockKind(kind);

NibbleMapSet(pCodeHeap, (TADDR)mem, TRUE);
NibbleMapSet(pCodeHeap, mem, TRUE);

// Record the jump stub reservation
pCodeHeap->reserveForJumpStubs += requestInfo.getReserveForJumpStubs();
Expand Down Expand Up @@ -3517,9 +3532,9 @@ void EEJitManager::DeleteCodeHeap(HeapList *pHeapList)
pHp->SetNext(pHeapList->GetNext());
}

DeleteEEFunctionTable((PVOID)pHeapList);
DeleteEEFunctionTable((PVOID)pHeapList->mapBase);

ExecutionManager::DeleteRange((TADDR)pHeapList);
ExecutionManager::DeleteRange((TADDR)pHeapList->mapBase);

LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n",
(const BYTE*)pHeapList->startAddress,
Expand All @@ -3531,6 +3546,7 @@ void EEJitManager::DeleteCodeHeap(HeapList *pHeapList)
// delete pHeapList->pHeap;
CodeHeap* pHeap = pHeapList->pHeap;
delete pHeap;
delete pHeapList;
}

#endif // #ifndef DACCESS_COMPILE
Expand Down
Loading