Skip to content

Commit ef4cf96

Browse files
committed
Simplify 64-bit platform conditions
1 parent 4158fca commit ef4cf96

File tree

13 files changed

+21
-97
lines changed

13 files changed

+21
-97
lines changed

src/coreclr/inc/clrconfigvalues.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -487,11 +487,11 @@ RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_ProcessorCount, W("PROCESSOR_COUNT"), 0, "S
487487
#endif // _DEBUG
488488
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TieredCompilation, W("TieredCompilation"), 1, "Enables tiered compilation")
489489
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_QuickJit, W("TC_QuickJit"), 1, "For methods that would be jitted, enable using quick JIT when appropriate.")
490-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
490+
#ifdef FEATURE_ON_STACK_REPLACEMENT
491491
RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 1, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.")
492-
#else // !(defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)) || defined(TARGET_RISCV64)
492+
#else // FEATURE_ON_STACK_REPLACEMENT
493493
RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_TC_QuickJitForLoops, W("TC_QuickJitForLoops"), 0, "When quick JIT is enabled, quick JIT may also be used for methods that contain loops.")
494-
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
494+
#endif // FEATURE_ON_STACK_REPLACEMENT
495495
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_AggressiveTiering, W("TC_AggressiveTiering"), 0, "Transition through tiers aggressively.")
496496
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountThreshold, W("TC_CallCountThreshold"), TC_CallCountThreshold, "Number of times a method must be called in tier 0 after which it is promoted to the next tier.")
497497
RETAIL_CONFIG_DWORD_INFO(EXTERNAL_TC_CallCountingDelayMs, W("TC_CallCountingDelayMs"), TC_CallCountingDelayMs, "A perpetual delay in milliseconds that is applied to call counting in tier 0 and jitting at higher tiers, while there is startup-like activity.")

src/coreclr/inc/gcinfotypes.h

Lines changed: 2 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,8 @@ struct GcStackSlot
113113
}
114114
};
115115

116+
// ReturnKind is not encoded in GCInfo v4 and later, except on x86.
117+
116118
//--------------------------------------------------------------------------------
117119
// ReturnKind -- encoding return type information in GcInfo
118120
//
@@ -137,61 +139,6 @@ struct GcStackSlot
137139
//
138140
//--------------------------------------------------------------------------------
139141

140-
// RT_Unset: An intermediate step for staged bringup.
141-
// When ReturnKind is RT_Unset, it means that the JIT did not set
142-
// the ReturnKind in the GCInfo, and therefore the VM cannot rely on it,
143-
// and must use other mechanisms (similar to GcInfo ver 1) to determine
144-
// the Return type's GC information.
145-
//
146-
// RT_Unset is only used in the following situations:
147-
// X64: Used by JIT64 until updated to use GcInfo v2 API
148-
// ARM: Used by JIT32 until updated to use GcInfo v2 API
149-
//
150-
// RT_Unset should have a valid encoding, whose bits are actually stored in the image.
151-
// For X86, there are no free bits, and there's no RT_Unused enumeration.
152-
153-
#if defined(TARGET_X86)
154-
155-
// 00 RT_Scalar
156-
// 01 RT_Object
157-
// 10 RT_ByRef
158-
// 11 RT_Float
159-
160-
#elif defined(TARGET_ARM)
161-
162-
// 00 RT_Scalar
163-
// 01 RT_Object
164-
// 10 RT_ByRef
165-
// 11 RT_Unset
166-
167-
#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
168-
169-
// Slim Header:
170-
171-
// 00 RT_Scalar
172-
// 01 RT_Object
173-
// 10 RT_ByRef
174-
// 11 RT_Unset
175-
176-
// Fat Header:
177-
178-
// 0000 RT_Scalar
179-
// 0001 RT_Object
180-
// 0010 RT_ByRef
181-
// 0011 RT_Unset
182-
// 0100 RT_Scalar_Obj
183-
// 1000 RT_Scalar_ByRef
184-
// 0101 RT_Obj_Obj
185-
// 1001 RT_Obj_ByRef
186-
// 0110 RT_ByRef_Obj
187-
// 1010 RT_ByRef_ByRef
188-
189-
#else
190-
#ifdef PORTABILITY_WARNING
191-
PORTABILITY_WARNING("Need ReturnKind for new Platform")
192-
#endif // PORTABILITY_WARNING
193-
#endif // Target checks
194-
195142
enum ReturnKind {
196143

197144
// Cases for Return in one register
@@ -1026,4 +973,3 @@ struct InterpreterGcInfoEncoding {
1026973
#endif // debug_instrumented_return
1027974

1028975
#endif // !__GCINFOTYPES_H__
1029-

src/coreclr/inc/switches.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
#if defined(TARGET_X86) || defined(TARGET_ARM) || defined(TARGET_BROWSER)
4747
#define USE_LAZY_PREFERRED_RANGE 0
4848

49-
#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64)
49+
#elif defined(TARGET_64BIT)
5050

5151
#if defined(HOST_UNIX)
5252
// In PAL we have a smechanism that reserves memory on start up that is

src/coreclr/jit/codegencommon.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2107,7 +2107,7 @@ void CodeGen::genEmitMachineCode()
21072107

21082108
bool trackedStackPtrsContig; // are tracked stk-ptrs contiguous ?
21092109

2110-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
2110+
#if defined(TARGET_64BIT)
21112111
trackedStackPtrsContig = false;
21122112
#elif defined(TARGET_ARM)
21132113
// On arm due to prespilling of arguments, tracked stk-ptrs may not be contiguous

src/coreclr/jit/importercalls.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9121,7 +9121,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni
91219121
return true;
91229122
}
91239123

9124-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
9124+
#if defined(TARGET_64BIT)
91259125
// Jit64 compat:
91269126
if (callerRetType == TYP_VOID)
91279127
{
@@ -9151,7 +9151,7 @@ bool Compiler::impTailCallRetTypeCompatible(bool allowWideni
91519151
{
91529152
return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
91539153
}
9154-
#endif // TARGET_AMD64 || TARGET_ARM64 || TARGET_LOONGARCH64 || TARGET_RISCV64
9154+
#endif // TARGET_64BIT
91559155

91569156
return false;
91579157
}

src/coreclr/jit/jitconfigvalues.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -704,11 +704,11 @@ CONFIG_STRING(JitGuardedDevirtualizationRange, "JitGuardedDevirtualizationRange"
704704
CONFIG_INTEGER(JitRandomGuardedDevirtualization, "JitRandomGuardedDevirtualization", 0)
705705

706706
// Enable insertion of patchpoints into Tier0 methods, switching to optimized where needed.
707-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
707+
#ifdef FEATURE_ON_STACK_REPLACEMENT
708708
RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 1)
709709
#else
710710
RELEASE_CONFIG_INTEGER(TC_OnStackReplacement, "TC_OnStackReplacement", 0)
711-
#endif // defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
711+
#endif // defined(FEATURE_ON_STACK_REPLACEMENT)
712712

713713
// Initial patchpoint counter value used by jitted code
714714
RELEASE_CONFIG_INTEGER(TC_OnStackReplacement_InitialCounter, "TC_OnStackReplacement_InitialCounter", 1000)

src/coreclr/jit/lclvars.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6739,7 +6739,7 @@ int Compiler::lvaToCallerSPRelativeOffset(int offset, bool isFpBased, bool forRo
67396739
offset += codeGen->genCallerSPtoInitialSPdelta();
67406740
}
67416741

6742-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
6742+
#ifdef FEATURE_ON_STACK_REPLACEMENT
67436743
if (forRootFrame && opts.IsOSR())
67446744
{
67456745
const PatchpointInfo* const ppInfo = info.compPatchpointInfo;

src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1143,15 +1143,6 @@ int UnixNativeCodeManager::TrailingEpilogueInstructionsCount(MethodInfo * pMetho
11431143
return 0;
11441144
}
11451145

1146-
// Convert the return kind that was encoded by RyuJIT to the
1147-
// enum used by the runtime.
1148-
GCRefKind GetGcRefKind(ReturnKind returnKind)
1149-
{
1150-
ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef));
1151-
1152-
return (GCRefKind)returnKind;
1153-
}
1154-
11551146
bool UnixNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
11561147
REGDISPLAY * pRegisterSet, // in
11571148
PTR_PTR_VOID * ppvRetAddrLocation) // out

src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -821,19 +821,6 @@ bool CoffNativeCodeManager::IsUnwindable(PTR_VOID pvAddress)
821821
return true;
822822
}
823823

824-
// Convert the return kind that was encoded by RyuJIT to the
825-
// enum used by the runtime.
826-
GCRefKind GetGcRefKind(ReturnKind returnKind)
827-
{
828-
#ifdef TARGET_ARM64
829-
ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef_ByRef));
830-
#else
831-
ASSERT((returnKind >= RT_Scalar) && (returnKind <= RT_ByRef));
832-
#endif
833-
834-
return (GCRefKind)returnKind;
835-
}
836-
837824
bool CoffNativeCodeManager::GetReturnAddressHijackInfo(MethodInfo * pMethodInfo,
838825
REGDISPLAY * pRegisterSet, // in
839826
PTR_PTR_VOID * ppvRetAddrLocation) // out
@@ -983,7 +970,7 @@ GCRefKind CoffNativeCodeManager::GetReturnValueKind(MethodInfo * pMethodInfo,
983970
hdrInfo infoBuf;
984971
size_t infoSize = DecodeGCHdrInfo(GCInfoToken(gcInfo), codeOffset, &infoBuf);
985972

986-
return GetGcRefKind(infoBuf.returnKind);
973+
return (GCRefKind)infoBuf.returnKind;
987974
}
988975
#endif
989976

src/coreclr/pal/src/exception/remote-unwind.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ typedef BOOL(*UnwindReadMemoryCallback)(PVOID address, PVOID buffer, SIZE_T size
114114
#define PRId PRId32
115115
#define PRIA "08"
116116
#define PRIxA PRIA PRIx
117-
#elif defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_S390X) || defined(TARGET_LOONGARCH64) || defined(TARGET_POWERPC64) || defined(TARGET_RISCV64)
117+
#elif defined(TARGET_64BIT)
118118
#define PRIx PRIx64
119119
#define PRIu PRIu64
120120
#define PRId PRId64

src/coreclr/vm/codeman.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2373,7 +2373,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize)
23732373
{
23742374
LIMITED_METHOD_CONTRACT;
23752375

2376-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
2376+
#if defined(TARGET_64BIT)
23772377
//
23782378
// Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce
23792379
// chance that we won't be able allocate jump stub because of lack of suitable address space.
@@ -2425,7 +2425,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
24252425
bool fAllocatedFromEmergencyJumpStubReserve = false;
24262426

24272427
size_t allocationSize = pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(initialRequestSize);
2428-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
2428+
#if defined(TARGET_64BIT)
24292429
if (!pInfo->IsInterpreted())
24302430
{
24312431
allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE);
@@ -2485,7 +2485,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
24852485
// this first allocation is critical as it sets up correctly the loader heap info
24862486
HeapList *pHp = new HeapList;
24872487

2488-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
2488+
#if defined(TARGET_64BIT)
24892489
if (pInfo->IsInterpreted())
24902490
{
24912491
pHp->CLRPersonalityRoutine = NULL;
@@ -2655,7 +2655,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe
26552655

26562656
size_t reserveSize = initialRequestSize;
26572657

2658-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
2658+
#if defined(TARGET_64BIT)
26592659
if (!pInfo->IsInterpreted())
26602660
{
26612661
reserveSize += JUMP_ALLOCATE_SIZE;

src/coreclr/vm/codeman.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -534,13 +534,13 @@ struct HeapList
534534
size_t reserveForJumpStubs; // Amount of memory reserved for jump stubs in this block
535535

536536
PTR_LoaderAllocator pLoaderAllocator; // LoaderAllocator of HeapList
537-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
537+
#if defined(TARGET_64BIT)
538538
BYTE* CLRPersonalityRoutine; // jump thunk to personality routine, NULL if there is no personality routine (e.g. interpreter code heap)
539539
#endif
540540

541541
TADDR GetModuleBase()
542542
{
543-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
543+
#if defined(TARGET_64BIT)
544544
return (CLRPersonalityRoutine != NULL) ? (TADDR)CLRPersonalityRoutine : (TADDR)mapBase;
545545
#else
546546
return (TADDR)mapBase;

src/coreclr/vm/dynamicmethod.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo)
448448
else
449449
#endif // FEATURE_INTERPRETER
450450
{
451-
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
451+
#if defined(TARGET_64BIT)
452452

453453
pTracker = AllocMemory_NoThrow(0, JUMP_ALLOCATE_SIZE, sizeof(void*), 0);
454454
if (pTracker == NULL)

0 commit comments

Comments
 (0)