Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions src/coreclr/clrdefinitions.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,6 @@ if (NOT CLR_CMAKE_HOST_ANDROID)
endif(NOT CLR_CMAKE_HOST_ANDROID)
add_definitions(-DFEATURE_SYMDIFF)
add_compile_definitions(FEATURE_TIERED_COMPILATION)
if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64)
add_compile_definitions(FEATURE_ON_STACK_REPLACEMENT)
endif (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64)
add_compile_definitions(FEATURE_PGO)
if (CLR_CMAKE_TARGET_ARCH_AMD64)
# Enable the AMD64 Unix struct passing JIT-EE interface for all AMD64 platforms, to enable altjit.
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/debug/daccess/fntableaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ struct FakeHeapList
size_t maxCodeHeapSize;
size_t reserveForJumpStubs;
DWORD_PTR pLoaderAllocator;
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
DWORD_PTR CLRPersonalityRoutine;
#endif

DWORD_PTR GetModuleBase()
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
return CLRPersonalityRoutine;
#else
return mapBase;
Expand Down
6 changes: 3 additions & 3 deletions src/coreclr/inc/clrconfigvalues.h
Original file line number Diff line number Diff line change
Expand Up @@ -487,11 +487,11 @@ RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_ProcessorCount, W("PROCESSOR_COUNT"), 0, "S
#endif // _DEBUG
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TieredCompilation, W("TieredCompilation"), 1, "Enables tiered compilation")
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_QuickJit, W("TC_QuickJit"), 1, "For methods that would be jitted, enable using quick JIT when appropriate.")
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#ifdef FEATURE_ON_STACK_REPLACEMENT
RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 1, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.")
#else // !(defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) || defined(TARGET_RISCV64)
#else // FEATURE_ON_STACK_REPLACEMENT
RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 0, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.")
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#endif // FEATURE_ON_STACK_REPLACEMENT
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_AggressiveTiering, W("TC_AggressiveTiering"), 0, "Transition through tiers aggressively.")
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountThreshold, W("TC_CallCountThreshold"), TC_CallCountThreshold, "Number of times a method must be called in tier 0 after which it is promoted to the next tier.")
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountingDelayMs, W("TC_CallCountingDelayMs"), TC_CallCountingDelayMs, "A perpetual delay in milliseconds that is applied to call counting in tier 0 and jitting at higher tiers, while there is startup-like activity.")
Expand Down
58 changes: 2 additions & 56 deletions src/coreclr/inc/gcinfotypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ struct GcStackSlot
}
};

// ReturnKind is not encoded in GCInfo v4 and later, except on x86.

//--------------------------------------------------------------------------------
// ReturnKind -- encoding return type information in GcInfo
//
Expand All @@ -137,61 +139,6 @@ struct GcStackSlot
//
//--------------------------------------------------------------------------------

// RT_Unset: An intermediate step for staged bringup.
// When ReturnKind is RT_Unset, it means that the JIT did not set
// the ReturnKind in the GCInfo, and therefore the VM cannot rely on it,
// and must use other mechanisms (similar to GcInfo ver 1) to determine
// the Return type's GC information.
//
// RT_Unset is only used in the following situations:
// X64: Used by JIT64 until updated to use GcInfo v2 API
// ARM: Used by JIT32 until updated to use GcInfo v2 API
//
// RT_Unset should have a valid encoding, whose bits are actually stored in the image.
// For X86, there are no free bits, and there's no RT_Unused enumeration.

#if defined(TARGET_X86)

// 00 RT_Scalar
// 01 RT_Object
// 10 RT_ByRef
// 11 RT_Float

#elif defined(TARGET_ARM)

// 00 RT_Scalar
// 01 RT_Object
// 10 RT_ByRef
// 11 RT_Unset

#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)

// Slim Header:

// 00 RT_Scalar
// 01 RT_Object
// 10 RT_ByRef
// 11 RT_Unset

// Fat Header:

// 0000 RT_Scalar
// 0001 RT_Object
// 0010 RT_ByRef
// 0011 RT_Unset
// 0100 RT_Scalar_Obj
// 1000 RT_Scalar_ByRef
// 0101 RT_Obj_Obj
// 1001 RT_Obj_ByRef
// 0110 RT_ByRef_Obj
// 1010 RT_ByRef_ByRef

#else
#ifdef PORTABILITY_WARNING
PORTABILITY_WARNING("Need ReturnKind for new Platform")
#endif // PORTABILITY_WARNING
#endif // Target checks

enum ReturnKind {

// Cases for Return in one register
Expand Down Expand Up @@ -1026,4 +973,3 @@ struct InterpreterGcInfoEncoding {
#endif // debug_instrumented_return

#endif // !__GCINFOTYPES_H__

6 changes: 4 additions & 2 deletions src/coreclr/inc/switches.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,12 @@
#if defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_BROWSER)
#define USE_LAZY_PREFERRED_RANGE 0

#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64)
#elif defined(TARGET_64BIT)

#define FEATURE_ON_STACK_REPLACEMENT

#if defined(HOST_UNIX)
// In PAL we have a smechanism that reserves memory on start up that is
// In PAL we have a mechanism that reserves memory on start up that is
// close to libcoreclr and intercepts calls to VirtualAlloc to serve back
// from this area.
#define USE_LAZY_PREFERRED_RANGE 0
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/jit/codegencommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2107,7 +2107,7 @@ void CodeGen::genEmitMachineCode()

bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ?

#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#ifdef TARGET_64BIT
trackedStackPtrsContig = false;
#elif defined(TARGET_ARM)
// On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/importercalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9121,7 +9121,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni
return true;
}

#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#ifdef TARGET_64BIT
// Jit64 compat:
if (callerRetType == TYP_VOID)
{
Expand Down Expand Up @@ -9151,7 +9151,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni
{
return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
}
#endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64
#endif // TARGET_64BIT

return false;
}
Expand Down
4 changes: 2 additions & 2 deletions src/coreclr/jit/jitconfigvalues.h
Original file line number Diff line number Diff line change
Expand Up @@ -704,11 +704,11 @@ CONFIG_STRING(JitGuardedDevirtualizationRange, "JitGuardedDevirtualizationRange"
CONFIG_INTEGER(JitRandomGuardedDevirtualization, "JitRandomGuardedDevirtualization", 0)

// Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed.
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#ifdef FEATURE_ON_STACK_REPLACEMENT
RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 1)
#else
RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 0)
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#endif // FEATURE_ON_STACK_REPLACEMENT

// Initial patchpoint counter value used by jitted code
RELEASE_CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, "TC_OnStackReplacement_InitialCounter", 1000)
Expand Down
6 changes: 2 additions & 4 deletions src/coreclr/jit/lclvars.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6739,7 +6739,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo
offset += codeGen->genCallerSPtoInitialSPdelta();
}

#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#ifdef FEATURE_ON_STACK_REPLACEMENT
if (forRootFrame && opts.IsOSR())
{
const PatchpointInfo* const ppInfo = info.compPatchpointInfo;
Expand All @@ -6757,9 +6757,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo
// is simply TotalFrameSize plus one register.
//
const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES;

#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)

#else
const int adjustment = ppInfo->TotalFrameSize();
#endif

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1143,15 +1143,6 @@ int UnixNativeCodeManager::TrailingEpilogueInstructionsCount(MethodInfo * pMetho
return 0;
}

// Convert the return kind that was encoded by RyuJIT to the
// enum used by the runtime.
GCRefKind GetGcRefKind(ReturnKind returnKind)
{
ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef));

return (GCRefKind)returnKind;
}

bool UnixNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in
PTR_PTR_VOID * ppvRetAddrLocation) // out
Expand Down
16 changes: 2 additions & 14 deletions src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -821,19 +821,6 @@ bool CoffNativeCodeManager::IsUnwindable(PTR_VOID pvAddress)
return true;
}

// Convert the return kind that was encoded by RyuJIT to the
// enum used by the runtime.
GCRefKind GetGcRefKind(ReturnKind returnKind)
{
#ifdef TARGET_ARM64
ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef));
#else
ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef));
#endif

return (GCRefKind)returnKind;
}

bool CoffNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in
PTR_PTR_VOID * ppvRetAddrLocation) // out
Expand Down Expand Up @@ -983,7 +970,8 @@ GCRefKind CoffNativeCodeManager::GetReturnValueKind(MethodInfo * pMethodInfo,
hdrInfo infoBuf;
size_t infoSize = DecodeGCHdrInfo(GCInfoToken(gcInfo), codeOffset, &infoBuf);

return GetGcRefKind(infoBuf.returnKind);
ASSERT(infoBuf.returnKind != RT_Float); // See TODO above
return (GCRefKind)infoBuf.returnKind;
}
#endif

Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/pal/src/exception/remote-unwind.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size
#define PRId PRId32
#define PRIA "08"
#define PRIxA PRIA PRIx
#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64)
#elif defined(TARGET_64BIT)
#define PRIx PRIx64
#define PRIu PRIu64
#define PRId PRId64
Expand Down
10 changes: 5 additions & 5 deletions src/coreclr/vm/codeman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2373,7 +2373,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize)
{
LIMITED_METHOD_CONTRACT;

#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#ifdef TARGET_64BIT
//
// Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce
// chance that we won't be able allocate jump stub because of lack of suitable address space.
Expand Down Expand Up @@ -2425,7 +2425,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
bool fAllocatedFromEmergencyJumpStubReserve = false;

size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize);
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
if (!pInfo->IsInterpreted())
{
allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE);
Expand Down Expand Up @@ -2485,7 +2485,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
// this first allocation is critical as it sets up correctly the loader heap info
HeapList *pHp = new HeapList;

#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
if (pInfo->IsInterpreted())
{
pHp->CLRPersonalityRoutine = NULL;
Expand Down Expand Up @@ -2526,7 +2526,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align
size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize));
pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize));
#ifdef TARGET_64BIT
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
if (pHp->CLRPersonalityRoutine != NULL)
{
ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12);
Expand Down Expand Up @@ -2655,7 +2655,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe

size_t reserveSize = initialRequestSize;

#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
if (!pInfo->IsInterpreted())
{
reserveSize += JUMP_ALLOCATE_SIZE;
Expand Down
6 changes: 3 additions & 3 deletions src/coreclr/vm/codeman.h
Original file line number Diff line number Diff line change
Expand Up @@ -534,13 +534,13 @@ struct HeapList
size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block

PTR_LoaderAllocator pLoaderAllocator; // LoaderAllocator of HeapList
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
BYTE* CLRPersonalityRoutine; // jump thunk to personality routine, NULL if there is no personality routine (e.g. interpreter code heap)
#endif

TADDR GetModuleBase()
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
return (CLRPersonalityRoutine != NULL) ? (TADDR)CLRPersonalityRoutine : (TADDR)mapBase;
#else
return (TADDR)mapBase;
Expand Down Expand Up @@ -2281,7 +2281,7 @@ class ExecutionManager
BOOL Acquired();
};

#ifdef TARGET_64BIT
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
static ULONG GetCLRPersonalityRoutineValue()
{
LIMITED_METHOD_CONTRACT;
Expand Down
15 changes: 4 additions & 11 deletions src/coreclr/vm/dynamicmethod.cpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,5 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//

//


#include "common.h"
#include "dynamicmethod.h"
Expand All @@ -18,7 +14,6 @@
#include "CachedInterfaceDispatchPal.h"
#include "CachedInterfaceDispatch.h"


#ifndef DACCESS_COMPILE

// get the method table for dynamic methods
Expand Down Expand Up @@ -404,7 +399,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
// Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation
ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100;

#if defined(TARGET_AMD64) || defined(TARGET_ARM64)
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
ReserveBlockSize += JUMP_ALLOCATE_SIZE;
#endif

Expand Down Expand Up @@ -440,6 +435,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)

TrackAllocation *pTracker = NULL;

#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
#ifdef FEATURE_INTERPRETER
if (pInfo->IsInterpreted())
{
Expand All @@ -448,8 +444,6 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
else
#endif // FEATURE_INTERPRETER
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)

pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0);
if (pTracker == NULL)
{
Expand All @@ -460,9 +454,8 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
}

pHp->CLRPersonalityRoutine = (BYTE *)(pTracker + 1);

#endif
}
#endif

pHp->hpNext = NULL;
pHp->pHeap = (PTR_CodeHeap)this;
Expand All @@ -481,7 +474,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
pHp->maxCodeHeapSize = m_TotalBytesAvailable - (pTracker ? pTracker->size : 0);
pHp->reserveForJumpStubs = 0;

#ifdef HOST_64BIT
#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS)
if (pHp->CLRPersonalityRoutine != NULL)
{
ExecutableWriterHolder<BYTE> personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12);
Expand Down
Loading
Loading