From ef4cf96ebd171bbcd4a4ebb7181796717376b243 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Sat, 26 Apr 2025 22:04:57 +0000 Subject: [PATCH 01/22] Simplify 64-bit platform conditions --- src/coreclr/inc/clrconfigvalues.h | 6 +- src/coreclr/inc/gcinfotypes.h | 58 +------------------ src/coreclr/inc/switches.h | 2 +- src/coreclr/jit/codegencommon.cpp | 2 +- src/coreclr/jit/importercalls.cpp | 4 +- src/coreclr/jit/jitconfigvalues.h | 4 +- src/coreclr/jit/lclvars.cpp | 2 +- .../Runtime/unix/UnixNativeCodeManager.cpp | 9 --- .../Runtime/windows/CoffNativeCodeManager.cpp | 15 +---- .../pal/src/exception/remote-unwind.cpp | 2 +- src/coreclr/vm/codeman.cpp | 8 +-- src/coreclr/vm/codeman.h | 4 +- src/coreclr/vm/dynamicmethod.cpp | 2 +- 13 files changed, 21 insertions(+), 97 deletions(-) diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h index 8aee6fea67127e..378f4c8b0948be 100644 --- a/src/coreclr/inc/clrconfigvalues.h +++ b/src/coreclr/inc/clrconfigvalues.h @@ -487,11 +487,11 @@ RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_ProcessorCount, W("PROCESSOR_COUNT"), 0, "S #endif // _DEBUG RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TieredCompilation, W("TieredCompilation"), 1, "Enables tiered compilation") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_QuickJit, W("TC_QuickJit"), 1, "For methods that would be jitted, enable using quick JIT when appropriate.") -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef FEATURE_ON_STACK_REPLACEMENT RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 1, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.") -#else // !(defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) || defined(TARGET_RISCV64) +#else // FEATURE_ON_STACK_REPLACEMENT RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 0, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.") -#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#endif // FEATURE_ON_STACK_REPLACEMENT RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_AggressiveTiering, W("TC_AggressiveTiering"), 0, "Transition through tiers aggressively.") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountThreshold, W("TC_CallCountThreshold"), TC_CallCountThreshold, "Number of times a method must be called in tier 0 after which it is promoted to the next tier.") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountingDelayMs, W("TC_CallCountingDelayMs"), TC_CallCountingDelayMs, "A perpetual delay in milliseconds that is applied to call counting in tier 0 and jitting at higher tiers, while there is startup-like activity.") diff --git a/src/coreclr/inc/gcinfotypes.h b/src/coreclr/inc/gcinfotypes.h index d83fe0a0086666..0218a49853a1cd 100644 --- a/src/coreclr/inc/gcinfotypes.h +++ b/src/coreclr/inc/gcinfotypes.h @@ -113,6 +113,8 @@ struct GcStackSlot } }; +// ReturnKind is not encoded in GCInfo v4 and later, except on x86. + //-------------------------------------------------------------------------------- // ReturnKind -- encoding return type information in GcInfo // @@ -137,61 +139,6 @@ struct GcStackSlot // //-------------------------------------------------------------------------------- -// RT_Unset: An intermediate step for staged bringup. -// When ReturnKind is RT_Unset, it means that the JIT did not set -// the ReturnKind in the GCInfo, and therefore the VM cannot rely on it, -// and must use other mechanisms (similar to GcInfo ver 1) to determine -// the Return type's GC information. -// -// RT_Unset is only used in the following situations: -// X64: Used by JIT64 until updated to use GcInfo v2 API -// ARM: Used by JIT32 until updated to use GcInfo v2 API -// -// RT_Unset should have a valid encoding, whose bits are actually stored in the image. -// For X86, there are no free bits, and there's no RT_Unused enumeration. - -#if defined(TARGET_X86) - -// 00 RT_Scalar -// 01 RT_Object -// 10 RT_ByRef -// 11 RT_Float - -#elif defined(TARGET_ARM) - -// 00 RT_Scalar -// 01 RT_Object -// 10 RT_ByRef -// 11 RT_Unset - -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - -// Slim Header: - -// 00 RT_Scalar -// 01 RT_Object -// 10 RT_ByRef -// 11 RT_Unset - -// Fat Header: - -// 0000 RT_Scalar -// 0001 RT_Object -// 0010 RT_ByRef -// 0011 RT_Unset -// 0100 RT_Scalar_Obj -// 1000 RT_Scalar_ByRef -// 0101 RT_Obj_Obj -// 1001 RT_Obj_ByRef -// 0110 RT_ByRef_Obj -// 1010 RT_ByRef_ByRef - -#else -#ifdef PORTABILITY_WARNING -PORTABILITY_WARNING("Need ReturnKind for new Platform") -#endif // PORTABILITY_WARNING -#endif // Target checks - enum ReturnKind { // Cases for Return in one register @@ -1026,4 +973,3 @@ struct InterpreterGcInfoEncoding { #endif // debug_instrumented_return #endif // !__GCINFOTYPES_H__ - diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h index 01d626407f47ef..2b839d45884721 100644 --- a/src/coreclr/inc/switches.h +++ b/src/coreclr/inc/switches.h @@ -46,7 +46,7 @@ #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_BROWSER) #define USE_LAZY_PREFERRED_RANGE 0 -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64) +#elif defined(TARGET_64BIT) #if defined(HOST_UNIX) // In PAL we have a smechanism that reserves memory on start up that is diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 6cda78bbaed410..d77aae43bc683e 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2107,7 +2107,7 @@ void CodeGen::genEmitMachineCode() bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ? -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) trackedStackPtrsContig = false; #elif defined(TARGET_ARM) // On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index ae3989f2f2d22c..c7f0f90ec10400 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -9121,7 +9121,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni return true; } -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) // Jit64 compat: if (callerRetType == TYP_VOID) { @@ -9151,7 +9151,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } -#endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 +#endif // TARGET_64BIT return false; } diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 2c9231cb47eb24..e51d75b17a08b8 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -704,11 +704,11 @@ CONFIG_STRING(JitGuardedDevirtualizationRange, "JitGuardedDevirtualizationRange" CONFIG_INTEGER(JitRandomGuardedDevirtualization, "JitRandomGuardedDevirtualization", 0) // Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed. -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef FEATURE_ON_STACK_REPLACEMENT RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 1) #else RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 0) -#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#endif // defined(FEATURE_ON_STACK_REPLACEMENT) // Initial patchpoint counter value used by jitted code RELEASE_CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, "TC_OnStackReplacement_InitialCounter", 1000) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 889147dc17c816..91195a6f57f10c 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -6739,7 +6739,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo offset += codeGen->genCallerSPtoInitialSPdelta(); } -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef FEATURE_ON_STACK_REPLACEMENT if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; diff --git a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp index 96d7eb5f22c51e..9ac748ecddd5ea 100644 --- a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp @@ -1143,15 +1143,6 @@ int UnixNativeCodeManager::TrailingEpilogueInstructionsCount(MethodInfo * pMetho return 0; } -// Convert the return kind that was encoded by RyuJIT to the -// enum used by the runtime. -GCRefKind GetGcRefKind(ReturnKind returnKind) -{ - ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef)); - - return (GCRefKind)returnKind; -} - bool UnixNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo, REGDISPLAY * pRegisterSet, // in PTR_PTR_VOID * ppvRetAddrLocation) // out diff --git a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp index 1b95e27caad563..8a327a7602ba34 100644 --- a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp @@ -821,19 +821,6 @@ bool CoffNativeCodeManager::IsUnwindable(PTR_VOID pvAddress) return true; } -// Convert the return kind that was encoded by RyuJIT to the -// enum used by the runtime. -GCRefKind GetGcRefKind(ReturnKind returnKind) -{ -#ifdef TARGET_ARM64 - ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef)); -#else - ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef)); -#endif - - return (GCRefKind)returnKind; -} - bool CoffNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo, REGDISPLAY * pRegisterSet, // in PTR_PTR_VOID * ppvRetAddrLocation) // out @@ -983,7 +970,7 @@ GCRefKind CoffNativeCodeManager::GetReturnValueKind(MethodInfo * pMethodInfo, hdrInfo infoBuf; size_t infoSize = DecodeGCHdrInfo(GCInfoToken(gcInfo), codeOffset, &infoBuf); - return GetGcRefKind(infoBuf.returnKind); + return (GCRefKind)infoBuf.returnKind; } #endif diff --git a/src/coreclr/pal/src/exception/remote-unwind.cpp b/src/coreclr/pal/src/exception/remote-unwind.cpp index 92c07660b3803f..67ab6c644389d0 100644 --- a/src/coreclr/pal/src/exception/remote-unwind.cpp +++ b/src/coreclr/pal/src/exception/remote-unwind.cpp @@ -114,7 +114,7 @@ typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size #define PRId PRId32 #define PRIA "08" #define PRIxA PRIA PRIx -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64) +#elif defined(TARGET_64BIT) #define PRIx PRIx64 #define PRIu PRIu64 #define PRId PRId64 diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index d8e5805976a8da..6b28d602de7465 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2373,7 +2373,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. @@ -2425,7 +2425,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap bool fAllocatedFromEmergencyJumpStubReserve = false; size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize); -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) if (!pInfo->IsInterpreted()) { allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE); @@ -2485,7 +2485,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) if (pInfo->IsInterpreted()) { pHp->CLRPersonalityRoutine = NULL; @@ -2655,7 +2655,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe size_t reserveSize = initialRequestSize; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) if (!pInfo->IsInterpreted()) { reserveSize += JUMP_ALLOCATE_SIZE; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index e7795c9c6ae12a..a169f0275777e6 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -534,13 +534,13 @@ struct HeapList size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block PTR_LoaderAllocator pLoaderAllocator; // LoaderAllocator of HeapList -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) BYTE* CLRPersonalityRoutine; // jump thunk to personality routine, NULL if there is no personality routine (e.g. interpreter code heap) #endif TADDR GetModuleBase() { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) return (CLRPersonalityRoutine != NULL) ? (TADDR)CLRPersonalityRoutine : (TADDR)mapBase; #else return (TADDR)mapBase; diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 2fc4698f2b3b4e..e656e4bf49b89c 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -448,7 +448,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) else #endif // FEATURE_INTERPRETER { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) From b321925d17910afa03db1506eab4687ac2e104a0 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Sun, 27 Apr 2025 11:28:27 +0000 Subject: [PATCH 02/22] Fix build --- src/coreclr/jit/lclvars.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 91195a6f57f10c..5b6bc6bfbe6a6f 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -6739,7 +6739,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo offset += codeGen->genCallerSPtoInitialSPdelta(); } -#ifdef FEATURE_ON_STACK_REPLACEMENT +#ifdef TARGET_64BIT if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; From 52b2217c26ed83c6e2869da014f7367cf4f8ad3b Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 13:59:56 +0000 Subject: [PATCH 03/22] Address CR feedback --- src/coreclr/clrdefinitions.cmake | 3 --- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/target.h | 4 ++++ 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 695c275492dc63..88353280c191c3 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -181,9 +181,6 @@ if (NOT CLR_CMAKE_HOST_ANDROID) endif(NOT CLR_CMAKE_HOST_ANDROID) add_definitions(-DFEATURE_SYMDIFF) add_compile_definitions(FEATURE_TIERED_COMPILATION) -if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) - add_compile_definitions(FEATURE_ON_STACK_REPLACEMENT) -endif (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) add_compile_definitions(FEATURE_PGO) if (CLR_CMAKE_TARGET_ARCH_AMD64) # Enable the AMD64 Unix struct passing JIT-EE interface for all AMD64 platforms, to enable altjit. diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 5b6bc6bfbe6a6f..91195a6f57f10c 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -6739,7 +6739,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo offset += codeGen->genCallerSPtoInitialSPdelta(); } -#ifdef TARGET_64BIT +#ifdef FEATURE_ON_STACK_REPLACEMENT if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h index b8baab7d1825e9..ac053c501b9cbc 100644 --- a/src/coreclr/jit/target.h +++ b/src/coreclr/jit/target.h @@ -9,6 +9,10 @@ #define FEATURE_CFI_SUPPORT #endif +#ifdef TARGET_64BIT +#define FEATURE_ON_STACK_REPLACEMENT +#endif + // Undefine all of the target OS macros // Within the JIT codebase we use the TargetOS features #ifdef TARGET_UNIX From 06fa6232d2a23d97202a7dd33a5d182f162738e9 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 14:05:40 +0000 Subject: [PATCH 04/22] Use #else for most common cases --- src/coreclr/jit/lclvars.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 91195a6f57f10c..5625adf1cc9d1e 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -6757,9 +6757,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo // is simply TotalFrameSize plus one register. // const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES; - -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - +#else const int adjustment = ppInfo->TotalFrameSize(); #endif From 0b0413f7d888ebbce034a387beac558ff7ecfcfb Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 14:12:09 +0000 Subject: [PATCH 05/22] Move to switches.h to share between vm and jit --- src/coreclr/inc/switches.h | 2 ++ src/coreclr/jit/target.h | 4 ---- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h index 2b839d45884721..df9f6afa32702c 100644 --- a/src/coreclr/inc/switches.h +++ b/src/coreclr/inc/switches.h @@ -48,6 +48,8 @@ #elif defined(TARGET_64BIT) +#define FEATURE_ON_STACK_REPLACEMENT + #if defined(HOST_UNIX) // In PAL we have a smechanism that reserves memory on start up that is // close to libcoreclr and intercepts calls to VirtualAlloc to serve back diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h index ac053c501b9cbc..b8baab7d1825e9 100644 --- a/src/coreclr/jit/target.h +++ b/src/coreclr/jit/target.h @@ -9,10 +9,6 @@ #define FEATURE_CFI_SUPPORT #endif -#ifdef TARGET_64BIT -#define FEATURE_ON_STACK_REPLACEMENT -#endif - // Undefine all of the target OS macros // Within the JIT codebase we use the TargetOS features #ifdef TARGET_UNIX From e90b2e3f7b659342d94e4a39fdb9fddc47611303 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:13:17 +0300 Subject: [PATCH 06/22] Feedback --- src/coreclr/vm/dynamicmethod.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index e656e4bf49b89c..070405e86f05de 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -1,9 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// - -// - #include "common.h" #include "dynamicmethod.h" @@ -18,7 +14,6 @@ #include "CachedInterfaceDispatchPal.h" #include "CachedInterfaceDispatch.h" - #ifndef DACCESS_COMPILE // get the method table for dynamic methods @@ -404,7 +399,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_64BIT) || defined(TARGET_WINDOWS) ReserveBlockSize += JUMP_ALLOCATE_SIZE; #endif @@ -448,7 +443,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) else #endif // FEATURE_INTERPRETER { -#if defined(TARGET_64BIT) +#if defined(TARGET_64BIT) || defined(TARGET_WINDOWS) pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) From ab1788d960987392edb2ef34eaecded156321106 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:16:17 +0300 Subject: [PATCH 07/22] Apply suggestions from code review --- src/coreclr/jit/codegencommon.cpp | 2 +- src/coreclr/jit/importercalls.cpp | 2 +- src/coreclr/vm/codeman.cpp | 8 ++++---- src/coreclr/vm/codeman.h | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index d77aae43bc683e..459cf0772db574 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2107,7 +2107,7 @@ void CodeGen::genEmitMachineCode() bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ? -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT trackedStackPtrsContig = false; #elif defined(TARGET_ARM) // On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index c7f0f90ec10400..63401e011f19d2 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -9121,7 +9121,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni return true; } -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT // Jit64 compat: if (callerRetType == TYP_VOID) { diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 6b28d602de7465..fe9c27b0ef18e8 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2373,7 +2373,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. @@ -2425,7 +2425,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap bool fAllocatedFromEmergencyJumpStubReserve = false; size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize); -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT if (!pInfo->IsInterpreted()) { allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE); @@ -2485,7 +2485,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT if (pInfo->IsInterpreted()) { pHp->CLRPersonalityRoutine = NULL; @@ -2655,7 +2655,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe size_t reserveSize = initialRequestSize; -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT if (!pInfo->IsInterpreted()) { reserveSize += JUMP_ALLOCATE_SIZE; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index a169f0275777e6..f89787cea14443 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -534,13 +534,13 @@ struct HeapList size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block PTR_LoaderAllocator pLoaderAllocator; // LoaderAllocator of HeapList -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT BYTE* CLRPersonalityRoutine; // jump thunk to personality routine, NULL if there is no personality routine (e.g. interpreter code heap) #endif TADDR GetModuleBase() { -#if defined(TARGET_64BIT) +#ifdef TARGET_64BIT return (CLRPersonalityRoutine != NULL) ? (TADDR)CLRPersonalityRoutine : (TADDR)mapBase; #else return (TADDR)mapBase; From 91c33f2f6074e2764d2b6f910e4ee40e681e6e7e Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:27:13 +0300 Subject: [PATCH 08/22] Update src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp Co-authored-by: Jan Kotas --- src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp index 8a327a7602ba34..a2debab5499ee6 100644 --- a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp @@ -970,6 +970,7 @@ GCRefKind CoffNativeCodeManager::GetReturnValueKind(MethodInfo * pMethodInfo, hdrInfo infoBuf; size_t infoSize = DecodeGCHdrInfo(GCInfoToken(gcInfo), codeOffset, &infoBuf); + ASSERT(infoBuf.returnKind != RT_Float); // See TODO above return (GCRefKind)infoBuf.returnKind; } #endif From 50a93c87b1bbc8a09f4e2210bbff1d4703727e74 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 13:29:33 -0700 Subject: [PATCH 09/22] Apply suggestions from code review --- src/coreclr/vm/dynamicmethod.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 070405e86f05de..4ac7dbcab7975f 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -399,7 +399,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; -#if defined(TARGET_64BIT) || defined(TARGET_WINDOWS) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) ReserveBlockSize += JUMP_ALLOCATE_SIZE; #endif @@ -443,7 +443,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) else #endif // FEATURE_INTERPRETER { -#if defined(TARGET_64BIT) || defined(TARGET_WINDOWS) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) From 1f3edb4ad9061454af9b369678278f7369b461e1 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:31:45 +0300 Subject: [PATCH 10/22] Update dynamicmethod.cpp --- src/coreclr/vm/dynamicmethod.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 4ac7dbcab7975f..b42d3249e7566d 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -476,7 +476,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) pHp->maxCodeHeapSize = m_TotalBytesAvailable - (pTracker ? pTracker->size : 0); pHp->reserveForJumpStubs = 0; -#ifdef HOST_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (pHp->CLRPersonalityRoutine != NULL) { ExecutableWriterHolder personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); From d91dc3446c838595ca42d3cbf57d3c1052ae4f9d Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:41:19 +0300 Subject: [PATCH 11/22] Apply suggestions from code review Co-authored-by: Jan Kotas --- src/coreclr/vm/codeman.cpp | 6 +++--- src/coreclr/vm/codeman.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index fe9c27b0ef18e8..8c424b4d147cef 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2425,7 +2425,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap bool fAllocatedFromEmergencyJumpStubReserve = false; size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize); -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (!pInfo->IsInterpreted()) { allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE); @@ -2485,7 +2485,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (pInfo->IsInterpreted()) { pHp->CLRPersonalityRoutine = NULL; @@ -2655,7 +2655,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe size_t reserveSize = initialRequestSize; -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (!pInfo->IsInterpreted()) { reserveSize += JUMP_ALLOCATE_SIZE; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index f89787cea14443..a03a28e543179b 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -534,13 +534,13 @@ struct HeapList size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block PTR_LoaderAllocator pLoaderAllocator; // LoaderAllocator of HeapList -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) BYTE* CLRPersonalityRoutine; // jump thunk to personality routine, NULL if there is no personality routine (e.g. interpreter code heap) #endif TADDR GetModuleBase() { -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) return (CLRPersonalityRoutine != NULL) ? (TADDR)CLRPersonalityRoutine : (TADDR)mapBase; #else return (TADDR)mapBase; From 6a1013a3137bf62d7ce9b9e714aa994bc3d2cad3 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Mon, 28 Apr 2025 23:42:21 +0300 Subject: [PATCH 12/22] Update fntableaccess.h --- src/coreclr/debug/daccess/fntableaccess.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coreclr/debug/daccess/fntableaccess.h b/src/coreclr/debug/daccess/fntableaccess.h index 722f0581e218a0..988e220c5225f1 100644 --- a/src/coreclr/debug/daccess/fntableaccess.h +++ b/src/coreclr/debug/daccess/fntableaccess.h @@ -30,13 +30,13 @@ struct FakeHeapList size_t maxCodeHeapSize; size_t reserveForJumpStubs; DWORD_PTR pLoaderAllocator; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) DWORD_PTR CLRPersonalityRoutine; #endif DWORD_PTR GetModuleBase() { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) return CLRPersonalityRoutine; #else return mapBase; From acfbf7c8fd56c4631dd905ea88dfbd82b140827e Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 14:07:09 -0700 Subject: [PATCH 13/22] Update src/coreclr/inc/switches.h --- src/coreclr/inc/switches.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h index df9f6afa32702c..f0c5eaab533bb1 100644 --- a/src/coreclr/inc/switches.h +++ b/src/coreclr/inc/switches.h @@ -51,7 +51,7 @@ #define FEATURE_ON_STACK_REPLACEMENT #if defined(HOST_UNIX) - // In PAL we have a smechanism that reserves memory on start up that is + // In PAL we have a mechanism that reserves memory on start up that is // close to libcoreclr and intercepts calls to VirtualAlloc to serve back // from this area. #define USE_LAZY_PREFERRED_RANGE 0 From 878060b3ddf9c17f651879d9a50bdf419f282a76 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 14:07:36 -0700 Subject: [PATCH 14/22] Update src/coreclr/jit/jitconfigvalues.h --- src/coreclr/jit/jitconfigvalues.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index e51d75b17a08b8..c353f336f0ff4d 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -708,7 +708,7 @@ CONFIG_INTEGER(JitRandomGuardedDevirtualization, "JitRandomGuardedDevirtualizati RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 1) #else RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 0) -#endif // defined(FEATURE_ON_STACK_REPLACEMENT) +#endif // FEATURE_ON_STACK_REPLACEMENT // Initial patchpoint counter value used by jitted code RELEASE_CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, "TC_OnStackReplacement_InitialCounter", 1000) From 9356e47ef72ae0a8c362f7de47b752ba25d3d608 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Tue, 29 Apr 2025 02:35:45 +0300 Subject: [PATCH 15/22] Update dynamicmethod.cpp --- src/coreclr/vm/dynamicmethod.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 7c8a310998c124..40ea4a6c3e2c42 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -435,6 +435,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) TrackAllocation *pTracker = NULL; +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) #ifdef FEATURE_INTERPRETER if (pInfo->IsInterpreted()) { @@ -443,8 +444,6 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) else #endif // FEATURE_INTERPRETER { -#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) - pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) { @@ -455,9 +454,8 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) } pHp->CLRPersonalityRoutine = (BYTE *)(pTracker + 1); - -#endif } +#endif pHp->hpNext = NULL; pHp->pHeap = (PTR_CodeHeap)this; From 04e93c4555c55b8a351dd41a37e3e032330c641d Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Tue, 29 Apr 2025 00:11:34 +0000 Subject: [PATCH 16/22] Add more checks --- src/coreclr/vm/codeman.cpp | 2 +- src/coreclr/vm/codeman.h | 2 +- src/coreclr/vm/jitinterface.cpp | 36 +++++---------------------------- 3 files changed, 7 insertions(+), 33 deletions(-) diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 8c424b4d147cef..bdd91baf3ede35 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2526,7 +2526,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize)); pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize)); -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (pHp->CLRPersonalityRoutine != NULL) { ExecutableWriterHolder personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index a03a28e543179b..a531ff1e42d878 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -2281,7 +2281,7 @@ class ExecutionManager BOOL Acquired(); }; -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) static ULONG GetCLRPersonalityRoutineValue() { LIMITED_METHOD_CONTRACT; diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index eac05399f5bdf7..347e3c8d6843a3 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11608,45 +11608,19 @@ void CEEJitInfo::allocUnwindInfo ( memcpy(pUnwindInfoRW, pUnwindBlock, unwindSize); -#if defined(TARGET_X86) - - // Do NOTHING - -#elif defined(TARGET_AMD64) - +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) +#ifdef TARGET_AMD64 pUnwindInfoRW->Flags = UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER; ULONG * pPersonalityRoutineRW = (ULONG*)ALIGN_UP(&(pUnwindInfoRW->UnwindCode[pUnwindInfoRW->CountOfUnwindCodes]), sizeof(ULONG)); *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#elif defined(TARGET_ARM64) - +#else // TARGET_AMD64 *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#elif defined(TARGET_ARM) - - *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit - - ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); - *pPersonalityRoutineRW = (TADDR)ProcessCLRException - baseAddress; - -#elif defined(TARGET_LOONGARCH64) - - *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit - - ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); - *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#elif defined(TARGET_RISCV64) - *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit - - ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); - *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#endif +#endif // TARGET_AMD64 +#endif // TARGET_64BIT && TARGET_WINDOWS EE_TO_JIT_TRANSITION(); #else // FEATURE_EH_FUNCLETS From 3188bad63e16c39269d1317f601761242d3d6356 Mon Sep 17 00:00:00 2001 From: Adeel Mujahid <3840695+am11@users.noreply.github.com> Date: Tue, 29 Apr 2025 00:22:58 +0000 Subject: [PATCH 17/22] Address CR fb --- src/coreclr/vm/jitinterface.cpp | 34 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 347e3c8d6843a3..2e0c09a84f1487 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11382,11 +11382,10 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry) EE_TO_JIT_TRANSITION(); } +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) void reservePersonalityRoutineSpace(uint32_t &unwindSize) { -#if defined(TARGET_X86) - // Do nothing -#elif defined(TARGET_AMD64) +#if defined(TARGET_AMD64) // Add space for personality routine, it must be 4-byte aligned. // Everything in the UNWIND_INFO up to the variable-sized UnwindCodes // array has already had its size included in unwindSize by the caller. @@ -11398,29 +11397,16 @@ void reservePersonalityRoutineSpace(uint32_t &unwindSize) _ASSERTE(FitsInU4(unwindSize + sizeof(ULONG))); unwindSize = (ULONG)(ALIGN_UP(unwindSize, sizeof(ULONG))); -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) - // The JIT passes in a 4-byte aligned block of unwind data. - _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); - - // Add space for personality routine, it must be 4-byte aligned. - unwindSize += sizeof(ULONG); -#elif defined(TARGET_LOONGARCH64) - // The JIT passes in a 4-byte aligned block of unwind data. - _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); - - // Add space for personality routine, it must be 4-byte aligned. - unwindSize += sizeof(ULONG); -#elif defined(TARGET_RISCV64) +#else // TARGET_AMD64 // The JIT passes in a 4-byte aligned block of unwind data. _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); // Add space for personality routine, it must be 4-byte aligned. unwindSize += sizeof(ULONG); -#else - PORTABILITY_ASSERT("reservePersonalityRoutineSpace"); -#endif // !defined(TARGET_AMD64) - +#endif // TARGET_AMD64 } +#endif // !TARGET_X86 && TARGET_WINDOWS + // Reserve memory for the method/funclet's unwind information. // Note that this must be called before allocMem. It should be // called once for the main method, once for every funclet, and @@ -11452,7 +11438,9 @@ void CEEJitInfo::reserveUnwindInfo(bool isFunclet, bool isColdCode, uint32_t unw uint32_t currentSize = unwindSize; +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) reservePersonalityRoutineSpace(currentSize); +#endif m_totalUnwindSize += currentSize; @@ -11543,7 +11531,9 @@ void CEEJitInfo::allocUnwindInfo ( m_usedUnwindSize += unwindSize; +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) reservePersonalityRoutineSpace(m_usedUnwindSize); +#endif _ASSERTE(m_usedUnwindSize <= m_totalUnwindSize); @@ -11608,7 +11598,7 @@ void CEEJitInfo::allocUnwindInfo ( memcpy(pUnwindInfoRW, pUnwindBlock, unwindSize); -#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) #ifdef TARGET_AMD64 pUnwindInfoRW->Flags = UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER; @@ -11620,7 +11610,7 @@ void CEEJitInfo::allocUnwindInfo ( ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); #endif // TARGET_AMD64 -#endif // TARGET_64BIT && TARGET_WINDOWS +#endif // !TARGET_X86 && TARGET_WINDOWS EE_TO_JIT_TRANSITION(); #else // FEATURE_EH_FUNCLETS From b7ca77659c57fd3f855c46ff759361aeb782024e Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 21:03:41 -0700 Subject: [PATCH 18/22] Apply suggestions from code review --- src/coreclr/vm/jitinterface.cpp | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 2e0c09a84f1487..08e4ae7c0defe6 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11382,13 +11382,9 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry) EE_TO_JIT_TRANSITION(); } -#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) void reservePersonalityRoutineSpace(uint32_t &unwindSize) { #if defined(TARGET_AMD64) - // Add space for personality routine, it must be 4-byte aligned. - // Everything in the UNWIND_INFO up to the variable-sized UnwindCodes - // array has already had its size included in unwindSize by the caller. unwindSize += sizeof(ULONG); // Note that the count of unwind codes (2 bytes each) is stored as a UBYTE @@ -11399,13 +11395,14 @@ void reservePersonalityRoutineSpace(uint32_t &unwindSize) unwindSize = (ULONG)(ALIGN_UP(unwindSize, sizeof(ULONG))); #else // TARGET_AMD64 // The JIT passes in a 4-byte aligned block of unwind data. + // Non-zero low bits would mean a compact encoding. _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); - - // Add space for personality routine, it must be 4-byte aligned. - unwindSize += sizeof(ULONG); #endif // TARGET_AMD64 +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) + // Add space for personality routine + unwindSize += sizeof(ULONG); +#endif // !TARGET_X86 && TARGET_WINDOWS } -#endif // !TARGET_X86 && TARGET_WINDOWS // Reserve memory for the method/funclet's unwind information. // Note that this must be called before allocMem. It should be @@ -11438,9 +11435,7 @@ void CEEJitInfo::reserveUnwindInfo(bool isFunclet, bool isColdCode, uint32_t unw uint32_t currentSize = unwindSize; -#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) reservePersonalityRoutineSpace(currentSize); -#endif m_totalUnwindSize += currentSize; @@ -11531,9 +11526,7 @@ void CEEJitInfo::allocUnwindInfo ( m_usedUnwindSize += unwindSize; -#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) reservePersonalityRoutineSpace(m_usedUnwindSize); -#endif _ASSERTE(m_usedUnwindSize <= m_totalUnwindSize); From e674e79cda235b6de592ed03a845ae7cf9c28fbd Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 21:04:59 -0700 Subject: [PATCH 19/22] Update src/coreclr/vm/jitinterface.cpp --- src/coreclr/vm/jitinterface.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 08e4ae7c0defe6..3fb4ee09cafae2 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11398,6 +11398,7 @@ void reservePersonalityRoutineSpace(uint32_t &unwindSize) // Non-zero low bits would mean a compact encoding. _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); #endif // TARGET_AMD64 + #if !defined(TARGET_X86) && defined(TARGET_WINDOWS) // Add space for personality routine unwindSize += sizeof(ULONG); From 4ab347a2ad25e84f96f0843335c1dd01dc957ef5 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 21:11:21 -0700 Subject: [PATCH 20/22] Update src/coreclr/vm/jitinterface.cpp --- src/coreclr/vm/jitinterface.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 3fb4ee09cafae2..a2e61fc86ad94d 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11395,7 +11395,7 @@ void reservePersonalityRoutineSpace(uint32_t &unwindSize) unwindSize = (ULONG)(ALIGN_UP(unwindSize, sizeof(ULONG))); #else // TARGET_AMD64 // The JIT passes in a 4-byte aligned block of unwind data. - // Non-zero low bits would mean a compact encoding. + // On Arm64, non-zero low bits would mean a compact encoding. _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); #endif // TARGET_AMD64 From d31bfed96a15d238036e7ec2bda5ded669bea921 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 21:12:20 -0700 Subject: [PATCH 21/22] Update src/coreclr/vm/jitinterface.cpp --- src/coreclr/vm/jitinterface.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index a2e61fc86ad94d..7f5b1663fd3cb1 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11385,8 +11385,6 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry) void reservePersonalityRoutineSpace(uint32_t &unwindSize) { #if defined(TARGET_AMD64) - unwindSize += sizeof(ULONG); - // Note that the count of unwind codes (2 bytes each) is stored as a UBYTE // So the largest size could be 510 bytes, plus the header and language // specific stuff. This can't overflow. From 5674ac308aa60cf4de70d8ecc33a3d2ed5a34452 Mon Sep 17 00:00:00 2001 From: Jan Kotas Date: Mon, 28 Apr 2025 21:12:57 -0700 Subject: [PATCH 22/22] Update src/coreclr/vm/jitinterface.cpp --- src/coreclr/vm/jitinterface.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index 7f5b1663fd3cb1..737f431d9102dc 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11388,7 +11388,6 @@ void reservePersonalityRoutineSpace(uint32_t &unwindSize) // Note that the count of unwind codes (2 bytes each) is stored as a UBYTE // So the largest size could be 510 bytes, plus the header and language // specific stuff. This can't overflow. - _ASSERTE(FitsInU4(unwindSize + sizeof(ULONG))); unwindSize = (ULONG)(ALIGN_UP(unwindSize, sizeof(ULONG))); #else // TARGET_AMD64