diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index b953dac5d862a2..efb6ab0738a1a5 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -183,9 +183,6 @@ if (NOT CLR_CMAKE_HOST_ANDROID) endif(NOT CLR_CMAKE_HOST_ANDROID) add_definitions(-DFEATURE_SYMDIFF) add_compile_definitions(FEATURE_TIERED_COMPILATION) -if (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) - add_compile_definitions(FEATURE_ON_STACK_REPLACEMENT) -endif (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_LOONGARCH64 OR CLR_CMAKE_TARGET_ARCH_RISCV64) add_compile_definitions(FEATURE_PGO) if (CLR_CMAKE_TARGET_ARCH_AMD64) # Enable the AMD64 Unix struct passing JIT-EE interface for all AMD64 platforms, to enable altjit. diff --git a/src/coreclr/debug/daccess/fntableaccess.h b/src/coreclr/debug/daccess/fntableaccess.h index 722f0581e218a0..988e220c5225f1 100644 --- a/src/coreclr/debug/daccess/fntableaccess.h +++ b/src/coreclr/debug/daccess/fntableaccess.h @@ -30,13 +30,13 @@ struct FakeHeapList size_t maxCodeHeapSize; size_t reserveForJumpStubs; DWORD_PTR pLoaderAllocator; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) DWORD_PTR CLRPersonalityRoutine; #endif DWORD_PTR GetModuleBase() { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) return CLRPersonalityRoutine; #else return mapBase; diff --git a/src/coreclr/inc/clrconfigvalues.h b/src/coreclr/inc/clrconfigvalues.h index 8aee6fea67127e..378f4c8b0948be 100644 --- a/src/coreclr/inc/clrconfigvalues.h +++ b/src/coreclr/inc/clrconfigvalues.h @@ -487,11 +487,11 @@ RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_ProcessorCount, W("PROCESSOR_COUNT"), 0, "S #endif // _DEBUG RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TieredCompilation, W("TieredCompilation"), 1, "Enables tiered compilation") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_QuickJit, W("TC_QuickJit"), 1, "For methods that would be jitted, enable using quick JIT when appropriate.") -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef FEATURE_ON_STACK_REPLACEMENT RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 1, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.") -#else // !(defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) || defined(TARGET_RISCV64) +#else // FEATURE_ON_STACK_REPLACEMENT RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 0, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.") -#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#endif // FEATURE_ON_STACK_REPLACEMENT RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_AggressiveTiering, W("TC_AggressiveTiering"), 0, "Transition through tiers aggressively.") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountThreshold, W("TC_CallCountThreshold"), TC_CallCountThreshold, "Number of times a method must be called in tier 0 after which it is promoted to the next tier.") RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountingDelayMs, W("TC_CallCountingDelayMs"), TC_CallCountingDelayMs, "A perpetual delay in milliseconds that is applied to call counting in tier 0 and jitting at higher tiers, while there is startup-like activity.") diff --git a/src/coreclr/inc/gcinfotypes.h b/src/coreclr/inc/gcinfotypes.h index d83fe0a0086666..0218a49853a1cd 100644 --- a/src/coreclr/inc/gcinfotypes.h +++ b/src/coreclr/inc/gcinfotypes.h @@ -113,6 +113,8 @@ struct GcStackSlot } }; +// ReturnKind is not encoded in GCInfo v4 and later, except on x86. + //-------------------------------------------------------------------------------- // ReturnKind -- encoding return type information in GcInfo // @@ -137,61 +139,6 @@ struct GcStackSlot // //-------------------------------------------------------------------------------- -// RT_Unset: An intermediate step for staged bringup. -// When ReturnKind is RT_Unset, it means that the JIT did not set -// the ReturnKind in the GCInfo, and therefore the VM cannot rely on it, -// and must use other mechanisms (similar to GcInfo ver 1) to determine -// the Return type's GC information. -// -// RT_Unset is only used in the following situations: -// X64: Used by JIT64 until updated to use GcInfo v2 API -// ARM: Used by JIT32 until updated to use GcInfo v2 API -// -// RT_Unset should have a valid encoding, whose bits are actually stored in the image. -// For X86, there are no free bits, and there's no RT_Unused enumeration. - -#if defined(TARGET_X86) - -// 00 RT_Scalar -// 01 RT_Object -// 10 RT_ByRef -// 11 RT_Float - -#elif defined(TARGET_ARM) - -// 00 RT_Scalar -// 01 RT_Object -// 10 RT_ByRef -// 11 RT_Unset - -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - -// Slim Header: - -// 00 RT_Scalar -// 01 RT_Object -// 10 RT_ByRef -// 11 RT_Unset - -// Fat Header: - -// 0000 RT_Scalar -// 0001 RT_Object -// 0010 RT_ByRef -// 0011 RT_Unset -// 0100 RT_Scalar_Obj -// 1000 RT_Scalar_ByRef -// 0101 RT_Obj_Obj -// 1001 RT_Obj_ByRef -// 0110 RT_ByRef_Obj -// 1010 RT_ByRef_ByRef - -#else -#ifdef PORTABILITY_WARNING -PORTABILITY_WARNING("Need ReturnKind for new Platform") -#endif // PORTABILITY_WARNING -#endif // Target checks - enum ReturnKind { // Cases for Return in one register @@ -1026,4 +973,3 @@ struct InterpreterGcInfoEncoding { #endif // debug_instrumented_return #endif // !__GCINFOTYPES_H__ - diff --git a/src/coreclr/inc/switches.h b/src/coreclr/inc/switches.h index 01d626407f47ef..f0c5eaab533bb1 100644 --- a/src/coreclr/inc/switches.h +++ b/src/coreclr/inc/switches.h @@ -46,10 +46,12 @@ #if defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_BROWSER) #define USE_LAZY_PREFERRED_RANGE 0 -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64) +#elif defined(TARGET_64BIT) + +#define FEATURE_ON_STACK_REPLACEMENT #if defined(HOST_UNIX) - // In PAL we have a smechanism that reserves memory on start up that is + // In PAL we have a mechanism that reserves memory on start up that is // close to libcoreclr and intercepts calls to VirtualAlloc to serve back // from this area. #define USE_LAZY_PREFERRED_RANGE 0 diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 6cda78bbaed410..459cf0772db574 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -2107,7 +2107,7 @@ void CodeGen::genEmitMachineCode() bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ? -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef TARGET_64BIT trackedStackPtrsContig = false; #elif defined(TARGET_ARM) // On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index ae3989f2f2d22c..63401e011f19d2 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -9121,7 +9121,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni return true; } -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef TARGET_64BIT // Jit64 compat: if (callerRetType == TYP_VOID) { @@ -9151,7 +9151,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni { return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize); } -#endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64 +#endif // TARGET_64BIT return false; } diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 2c9231cb47eb24..c353f336f0ff4d 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -704,11 +704,11 @@ CONFIG_STRING(JitGuardedDevirtualizationRange, "JitGuardedDevirtualizationRange" CONFIG_INTEGER(JitRandomGuardedDevirtualization, "JitRandomGuardedDevirtualization", 0) // Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed. -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef FEATURE_ON_STACK_REPLACEMENT RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 1) #else RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 0) -#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#endif // FEATURE_ON_STACK_REPLACEMENT // Initial patchpoint counter value used by jitted code RELEASE_CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, "TC_OnStackReplacement_InitialCounter", 1000) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 889147dc17c816..5625adf1cc9d1e 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -6739,7 +6739,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo offset += codeGen->genCallerSPtoInitialSPdelta(); } -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef FEATURE_ON_STACK_REPLACEMENT if (forRootFrame && opts.IsOSR()) { const PatchpointInfo* const ppInfo = info.compPatchpointInfo; @@ -6757,9 +6757,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo // is simply TotalFrameSize plus one register. // const int adjustment = ppInfo->TotalFrameSize() + REGSIZE_BYTES; - -#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - +#else const int adjustment = ppInfo->TotalFrameSize(); #endif diff --git a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp index 96d7eb5f22c51e..9ac748ecddd5ea 100644 --- a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp @@ -1143,15 +1143,6 @@ int UnixNativeCodeManager::TrailingEpilogueInstructionsCount(MethodInfo * pMetho return 0; } -// Convert the return kind that was encoded by RyuJIT to the -// enum used by the runtime. -GCRefKind GetGcRefKind(ReturnKind returnKind) -{ - ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef)); - - return (GCRefKind)returnKind; -} - bool UnixNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo, REGDISPLAY * pRegisterSet, // in PTR_PTR_VOID * ppvRetAddrLocation) // out diff --git a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp index 1b95e27caad563..a2debab5499ee6 100644 --- a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp @@ -821,19 +821,6 @@ bool CoffNativeCodeManager::IsUnwindable(PTR_VOID pvAddress) return true; } -// Convert the return kind that was encoded by RyuJIT to the -// enum used by the runtime. -GCRefKind GetGcRefKind(ReturnKind returnKind) -{ -#ifdef TARGET_ARM64 - ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef)); -#else - ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef)); -#endif - - return (GCRefKind)returnKind; -} - bool CoffNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo, REGDISPLAY * pRegisterSet, // in PTR_PTR_VOID * ppvRetAddrLocation) // out @@ -983,7 +970,8 @@ GCRefKind CoffNativeCodeManager::GetReturnValueKind(MethodInfo * pMethodInfo, hdrInfo infoBuf; size_t infoSize = DecodeGCHdrInfo(GCInfoToken(gcInfo), codeOffset, &infoBuf); - return GetGcRefKind(infoBuf.returnKind); + ASSERT(infoBuf.returnKind != RT_Float); // See TODO above + return (GCRefKind)infoBuf.returnKind; } #endif diff --git a/src/coreclr/pal/src/exception/remote-unwind.cpp b/src/coreclr/pal/src/exception/remote-unwind.cpp index 92c07660b3803f..67ab6c644389d0 100644 --- a/src/coreclr/pal/src/exception/remote-unwind.cpp +++ b/src/coreclr/pal/src/exception/remote-unwind.cpp @@ -114,7 +114,7 @@ typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size #define PRId PRId32 #define PRIA "08" #define PRIxA PRIA PRIx -#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64) +#elif defined(TARGET_64BIT) #define PRIx PRIx64 #define PRIu PRIu64 #define PRId PRId64 diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index d8e5805976a8da..bdd91baf3ede35 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2373,7 +2373,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize) { LIMITED_METHOD_CONTRACT; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#ifdef TARGET_64BIT // // Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce // chance that we won't be able allocate jump stub because of lack of suitable address space. @@ -2425,7 +2425,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap bool fAllocatedFromEmergencyJumpStubReserve = false; size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize); -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (!pInfo->IsInterpreted()) { allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE); @@ -2485,7 +2485,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap // this first allocation is critical as it sets up correctly the loader heap info HeapList *pHp = new HeapList; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (pInfo->IsInterpreted()) { pHp->CLRPersonalityRoutine = NULL; @@ -2526,7 +2526,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap pHp->mapBase = ROUND_DOWN_TO_PAGE(pHp->startAddress); // round down to next lower page align size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize)); pHp->pHdrMap = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize)); -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (pHp->CLRPersonalityRoutine != NULL) { ExecutableWriterHolder personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); @@ -2655,7 +2655,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe size_t reserveSize = initialRequestSize; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (!pInfo->IsInterpreted()) { reserveSize += JUMP_ALLOCATE_SIZE; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index e7795c9c6ae12a..a531ff1e42d878 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -534,13 +534,13 @@ struct HeapList size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block PTR_LoaderAllocator pLoaderAllocator; // LoaderAllocator of HeapList -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) BYTE* CLRPersonalityRoutine; // jump thunk to personality routine, NULL if there is no personality routine (e.g. interpreter code heap) #endif TADDR GetModuleBase() { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) return (CLRPersonalityRoutine != NULL) ? (TADDR)CLRPersonalityRoutine : (TADDR)mapBase; #else return (TADDR)mapBase; @@ -2281,7 +2281,7 @@ class ExecutionManager BOOL Acquired(); }; -#ifdef TARGET_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) static ULONG GetCLRPersonalityRoutineValue() { LIMITED_METHOD_CONTRACT; diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 4b9df31b3e21e7..40ea4a6c3e2c42 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -1,9 +1,5 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -// - -// - #include "common.h" #include "dynamicmethod.h" @@ -18,7 +14,6 @@ #include "CachedInterfaceDispatchPal.h" #include "CachedInterfaceDispatch.h" - #ifndef DACCESS_COMPILE // get the method table for dynamic methods @@ -404,7 +399,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) ReserveBlockSize += JUMP_ALLOCATE_SIZE; #endif @@ -440,6 +435,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) TrackAllocation *pTracker = NULL; +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) #ifdef FEATURE_INTERPRETER if (pInfo->IsInterpreted()) { @@ -448,8 +444,6 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) else #endif // FEATURE_INTERPRETER { -#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) - pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0); if (pTracker == NULL) { @@ -460,9 +454,8 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) } pHp->CLRPersonalityRoutine = (BYTE *)(pTracker + 1); - -#endif } +#endif pHp->hpNext = NULL; pHp->pHeap = (PTR_CodeHeap)this; @@ -481,7 +474,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) pHp->maxCodeHeapSize = m_TotalBytesAvailable - (pTracker ? pTracker->size : 0); pHp->reserveForJumpStubs = 0; -#ifdef HOST_64BIT +#if defined(TARGET_64BIT) && defined(TARGET_WINDOWS) if (pHp->CLRPersonalityRoutine != NULL) { ExecutableWriterHolder personalityRoutineWriterHolder(pHp->CLRPersonalityRoutine, 12); diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index eac05399f5bdf7..737f431d9102dc 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -11384,43 +11384,24 @@ void CEECodeGenInfo::CompressDebugInfo(PCODE nativeEntry) void reservePersonalityRoutineSpace(uint32_t &unwindSize) { -#if defined(TARGET_X86) - // Do nothing -#elif defined(TARGET_AMD64) - // Add space for personality routine, it must be 4-byte aligned. - // Everything in the UNWIND_INFO up to the variable-sized UnwindCodes - // array has already had its size included in unwindSize by the caller. - unwindSize += sizeof(ULONG); - +#if defined(TARGET_AMD64) // Note that the count of unwind codes (2 bytes each) is stored as a UBYTE // So the largest size could be 510 bytes, plus the header and language // specific stuff. This can't overflow. - _ASSERTE(FitsInU4(unwindSize + sizeof(ULONG))); unwindSize = (ULONG)(ALIGN_UP(unwindSize, sizeof(ULONG))); -#elif defined(TARGET_ARM) || defined(TARGET_ARM64) - // The JIT passes in a 4-byte aligned block of unwind data. - _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); - - // Add space for personality routine, it must be 4-byte aligned. - unwindSize += sizeof(ULONG); -#elif defined(TARGET_LOONGARCH64) - // The JIT passes in a 4-byte aligned block of unwind data. - _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); - - // Add space for personality routine, it must be 4-byte aligned. - unwindSize += sizeof(ULONG); -#elif defined(TARGET_RISCV64) +#else // TARGET_AMD64 // The JIT passes in a 4-byte aligned block of unwind data. + // On Arm64, non-zero low bits would mean a compact encoding. _ASSERTE(IS_ALIGNED(unwindSize, sizeof(ULONG))); +#endif // TARGET_AMD64 - // Add space for personality routine, it must be 4-byte aligned. +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) + // Add space for personality routine unwindSize += sizeof(ULONG); -#else - PORTABILITY_ASSERT("reservePersonalityRoutineSpace"); -#endif // !defined(TARGET_AMD64) - +#endif // !TARGET_X86 && TARGET_WINDOWS } + // Reserve memory for the method/funclet's unwind information. // Note that this must be called before allocMem. It should be // called once for the main method, once for every funclet, and @@ -11608,45 +11589,19 @@ void CEEJitInfo::allocUnwindInfo ( memcpy(pUnwindInfoRW, pUnwindBlock, unwindSize); -#if defined(TARGET_X86) - - // Do NOTHING - -#elif defined(TARGET_AMD64) - +#if !defined(TARGET_X86) && defined(TARGET_WINDOWS) +#ifdef TARGET_AMD64 pUnwindInfoRW->Flags = UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER; ULONG * pPersonalityRoutineRW = (ULONG*)ALIGN_UP(&(pUnwindInfoRW->UnwindCode[pUnwindInfoRW->CountOfUnwindCodes]), sizeof(ULONG)); *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#elif defined(TARGET_ARM64) - +#else // TARGET_AMD64 *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#elif defined(TARGET_ARM) - - *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit - - ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); - *pPersonalityRoutineRW = (TADDR)ProcessCLRException - baseAddress; - -#elif defined(TARGET_LOONGARCH64) - - *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit - - ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); - *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#elif defined(TARGET_RISCV64) - *(LONG *)pUnwindInfoRW |= (1 << 20); // X bit - - ULONG * pPersonalityRoutineRW = (ULONG*)((BYTE *)pUnwindInfoRW + ALIGN_UP(unwindSize, sizeof(ULONG))); - *pPersonalityRoutineRW = ExecutionManager::GetCLRPersonalityRoutineValue(); - -#endif +#endif // TARGET_AMD64 +#endif // !TARGET_X86 && TARGET_WINDOWS EE_TO_JIT_TRANSITION(); #else // FEATURE_EH_FUNCLETS