From 8212e067745ae7ecd767799cf35fdcca0a9b4c6b Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Wed, 24 Sep 2025 15:18:42 +0100 Subject: [PATCH 1/5] [RISCV] Introduce pass to promote double constants to a global array As discussed in #153402, we have inefficiences in handling constant pool access that are difficult to address. Using an IR pass to promote double constants to a global allows a higher degree of control of code generation for these accesses, resulting in improved performance on benchmarks that might otherwise have high register pressure due to accessing constant pool values separately rather than via a common base. Directly promoting double constants to separate global values and relying on the global merger to do a sensible thing would be one potential avenue to explore, but it is _not_ done in this version of the patch because: * The global merger pass needs fixes. For instance it claims to be a function pass, yet all of the work is done in initialisation. This means that attempts by backends to schedule it after a given module pass don't actually work as expected. * The heuristics used can impact codegen unexpectedly, so I worry that tweaking it to get the behaviour desired for promoted constants may lead to other issues. This may be completely tractable though. Now that #159352 has landed, the impact on terms if dynamically executed instructions is slightly smaller (as we are starting from a better baseline), but still worthwhile in lbm and nab from SPEC. Results below are for rva22u64: ``` Benchmark Baseline This PR Diff (%) ============================================================ 500.perlbench_r 180667466583 180667466661 0.00% 502.gcc_r 221281439537 221277561043 -0.00% 505.mcf_r 134656203905 134656204017 0.00% 508.namd_r 217646645213 217616374477 -0.01% 510.parest_r 291730242760 291917069933 0.06% 511.povray_r 30982459833 31101871667 0.39% 519.lbm_r 91217999812 89029313608 -2.40% 520.omnetpp_r 137705551722 138044390554 0.25% 523.xalancbmk_r 284733326286 284728940808 -0.00% 525.x264_r 379107521545 379100249676 -0.00% 526.blender_r 659391437704 659446918261 0.01% 531.deepsjeng_r 350038121655 350038121654 -0.00% 538.imagick_r 238568679271 238560769465 -0.00% 541.leela_r 405654701351 405660852862 0.00% 544.nab_r 398215801713 391380811065 -1.72% 557.xz_r 129832192046 129832192047 0.00% ``` --- llvm/lib/Target/RISCV/CMakeLists.txt | 1 + llvm/lib/Target/RISCV/RISCV.h | 3 + .../lib/Target/RISCV/RISCVPromoteConstant.cpp | 208 +++ llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 2 + .../RISCV/GlobalISel/irtranslator/vararg.ll | 32 +- llvm/test/CodeGen/RISCV/O3-pipeline.ll | 1 + llvm/test/CodeGen/RISCV/double-imm.ll | 8 +- llvm/test/CodeGen/RISCV/double-select-fcmp.ll | 37 +- llvm/test/CodeGen/RISCV/double-zfa.ll | 32 +- llvm/test/CodeGen/RISCV/vararg.ll | 1144 +++++++++++++---- .../CodeGen/RISCV/zdinx-boundary-check.ll | 4 +- 11 files changed, 1158 insertions(+), 314 deletions(-) create mode 100644 llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index 0ff178e1f1959..8702b9e63f867 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -59,6 +59,7 @@ add_llvm_target(RISCVCodeGen RISCVOptWInstrs.cpp RISCVPostRAExpandPseudoInsts.cpp RISCVPushPopOptimizer.cpp + RISCVPromoteConstant.cpp RISCVRedundantCopyElimination.cpp RISCVRegisterInfo.cpp RISCVSelectionDAGInfo.cpp diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h index ae9410193efe1..5b0ce521409ad 100644 --- a/llvm/lib/Target/RISCV/RISCV.h +++ b/llvm/lib/Target/RISCV/RISCV.h @@ -19,6 +19,7 @@ namespace llvm { class FunctionPass; +class ModulePass; class InstructionSelector; class PassRegistry; class RISCVRegisterBankInfo; @@ -111,6 +112,8 @@ void initializeRISCVO0PreLegalizerCombinerPass(PassRegistry &); FunctionPass *createRISCVPreLegalizerCombiner(); void initializeRISCVPreLegalizerCombinerPass(PassRegistry &); +ModulePass *createRISCVPromoteConstantPass(); + FunctionPass *createRISCVVLOptimizerPass(); void initializeRISCVVLOptimizerPass(PassRegistry &); diff --git a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp new file mode 100644 index 0000000000000..545ab47857e9c --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp @@ -0,0 +1,208 @@ +//==- RISCVPromoteConstant.cpp - Promote constant fp to global for RISC-V --==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "RISCV.h" +#include "RISCVSubtarget.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constant.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalValue.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstIterator.h" +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/InitializePasses.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Debug.h" + +using namespace llvm; + +#define DEBUG_TYPE "riscv-promote-const" + +STATISTIC(NumPromoted, "Number of promoted constants"); +STATISTIC(NumPromotedUses, "Number of promoted constants uses"); + +namespace { + +class RISCVPromoteConstant : public ModulePass { +public: + static char ID; + RISCVPromoteConstant() : ModulePass(ID) {} + + StringRef getPassName() const override { return "RISC-V Promote Constant"; } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.setPreservesCFG(); + } + + /// Iterate over the functions and promote the double fp constants that + /// would otherwise go into the constant pool to a constant array. + bool runOnModule(Module &M) override { + LLVM_DEBUG(dbgs() << getPassName() << '\n'); + // TargetMachine and Subtarget are needed to query isFPImmlegal. Get them + // from TargetPassConfig. + const TargetPassConfig &TPC = getAnalysis(); + const TargetMachine &TM = TPC.getTM(); + if (skipModule(M)) + return false; + bool Changed = false; + for (auto &MF : M) { + const RISCVSubtarget &ST = TM.getSubtarget(MF); + const RISCVTargetLowering *TLI = ST.getTargetLowering(); + Changed |= runOnFunction(MF, TLI); + } + return Changed; + } + +private: + bool runOnFunction(Function &F, const RISCVTargetLowering *TLI); +}; +} // end anonymous namespace + +char RISCVPromoteConstant::ID = 0; + +ModulePass *llvm::createRISCVPromoteConstantPass() { + return new RISCVPromoteConstant(); +} + +bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering *TLI) { + // Bail out and make no transformation if the target doesn't support + // doubles, or if we're not targeting RV64 as we currently see some + // regressions for those targets. + if (!TLI->isTypeLegal(MVT::f64) || !TLI->isTypeLegal(MVT::i64)) + return false; + + // Collect all unique double constants used in the function, and track their + // offset within the newly created global array. Also track uses that will + // be replaced later. + DenseMap ConstantMap; + SmallVector ConstantVector; + DenseMap> UsesInFunc; + + for (Instruction &I : instructions(F)) { + // PHI nodes are handled specially in a second loop below. + if (isa(I)) + continue; + for (Use &U : I.operands()) { + if (auto *C = dyn_cast(U.get())) { + if (C->getType()->isDoubleTy()) { + if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, /*ForCodeSize*/ false)) + continue; + UsesInFunc[C].push_back(&U); + if (ConstantMap.find(C) == ConstantMap.end()) { + ConstantMap[C] = ConstantVector.size(); + ConstantVector.push_back(C); + ++NumPromoted; + } + } + } + } + } + + // Collect uses from PHI nodes after other uses, because when transforming + // the function, we handle PHI uses afterwards. + for (BasicBlock &BB : F) { + for (PHINode &PN : BB.phis()) { + for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { + if (auto *C = dyn_cast(PN.getIncomingValue(i))) { + if (C->getType()->isDoubleTy()) { + if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, /*ForCodeSize*/ false)) + continue; + UsesInFunc[C].push_back(&PN.getOperandUse(i)); + if (ConstantMap.find(C) == ConstantMap.end()) { + ConstantMap[C] = ConstantVector.size(); + ConstantVector.push_back(C); + ++NumPromoted; + } + } + } + } + } + } + + // Bail out if no promotable constants found. + if (ConstantVector.empty()) + return false; + + // Create a global array containing the promoted constants. + Module *M = F.getParent(); + Type *DoubleTy = Type::getDoubleTy(M->getContext()); + ArrayType *ArrayTy = ArrayType::get(DoubleTy, ConstantVector.size()); + Constant *GlobalArrayInitializer = ConstantArray::get(ArrayTy, ConstantVector); + + auto *GlobalArray = new GlobalVariable( + *M, ArrayTy, + /*isConstant=*/true, GlobalValue::InternalLinkage, GlobalArrayInitializer, + ".promoted_doubles." + F.getName()); + + // Create GEP for the base pointer in the function entry. + IRBuilder<> EntryBuilder(&F.getEntryBlock().front()); + Value *BasePtr = EntryBuilder.CreateConstInBoundsGEP2_64( + GlobalArray->getValueType(), GlobalArray, 0, 0, "doubles.base"); + + // A cache to hold the loaded value for a given constant within a basic block. + DenseMap, Value *> LocalLoads; + + // Replace all uses with the loaded value. + for (Constant *ConstVal : ConstantVector) { + auto *Const = cast(ConstVal); + const auto &Uses = UsesInFunc.at(Const); + unsigned Idx = ConstantMap.at(Const); + + for (Use *U : Uses) { + Instruction *UserInst = cast(U->getUser()); + BasicBlock *InsertionBB; + Instruction *InsertionPt; + + if (auto *PN = dyn_cast(UserInst)) { + // If the user is a PHI node, we must insert the load in the + // corresponding predecessor basic block, before its terminator. + unsigned OperandIdx = U->getOperandNo(); + InsertionBB = PN->getIncomingBlock(OperandIdx); + InsertionPt = InsertionBB->getTerminator(); + } else { + // For any other instruction, we can insert the load right before it. + InsertionBB = UserInst->getParent(); + InsertionPt = UserInst; + } + + auto CacheKey = std::make_pair(Const, InsertionBB); + Value *LoadedVal = nullptr; + + // Re-use a load if it exists in the insertion block. + if (LocalLoads.count(CacheKey)) { + LoadedVal = LocalLoads.at(CacheKey); + } else { + // Otherwise, create a new GEP and Load at the correct insertion point. + IRBuilder<> Builder(InsertionPt); + Value *ElementPtr = Builder.CreateConstInBoundsGEP1_64( + DoubleTy, BasePtr, Idx, "double.addr"); + LoadedVal = Builder.CreateLoad(DoubleTy, ElementPtr, "double.val"); + + // Cache the newly created load for this block. + LocalLoads[CacheKey] = LoadedVal; + } + + U->set(LoadedVal); + ++NumPromotedUses; + } + } + + return true; +} diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index f81b1e1260ee3..1c0f4d3ad041b 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -463,6 +463,8 @@ void RISCVPassConfig::addIRPasses() { } bool RISCVPassConfig::addPreISel() { + if (TM->getOptLevel() != CodeGenOptLevel::None) + addPass(createRISCVPromoteConstantPass()); if (TM->getOptLevel() != CodeGenOptLevel::None) { // Add a barrier before instruction selection so that we will not get // deleted block address after enabling default outlining. See D99707 for diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll index 74961d12c1c85..7233ce6d593d0 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll @@ -67,8 +67,8 @@ define i32 @va1(ptr %fmt, ...) { ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va) ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 - ; RV32-NEXT: %20:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32) - ; RV32-NEXT: G_STORE %20(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) + ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32) + ; RV32-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32) ; RV32-NEXT: PseudoRET implicit $x10 @@ -105,8 +105,8 @@ define i32 @va1(ptr %fmt, ...) { ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va, align 4) ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; RV64-NEXT: %20:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64) - ; RV64-NEXT: G_STORE %20(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) + ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64) + ; RV64-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32) ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64) @@ -687,8 +687,8 @@ define i64 @va2(ptr %fmt, ...) nounwind { ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 - ; RV32-NEXT: %25:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32) - ; RV32-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) + ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32) + ; RV32-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64) @@ -733,8 +733,8 @@ define i64 @va2(ptr %fmt, ...) nounwind { ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; RV64-NEXT: %25:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64) - ; RV64-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) + ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64) + ; RV64-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) ; RV64-NEXT: $x10 = COPY [[LOAD1]](s64) @@ -974,8 +974,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 - ; RV32-NEXT: %24:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32) - ; RV32-NEXT: G_STORE %24(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) + ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32) + ; RV32-NEXT: G_STORE [[PTR_ADD5]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) ; RV32-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[LOAD1]] @@ -1020,8 +1020,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 - ; RV64-NEXT: %25:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64) - ; RV64-NEXT: G_STORE %25(p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) + ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64) + ; RV64-NEXT: G_STORE [[PTR_ADD6]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) ; RV64-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[LOAD1]] @@ -1724,8 +1724,8 @@ define i32 @va_large_stack(ptr %fmt, ...) { ; RV32-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s32) into %ir.va) ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va) ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 - ; RV32-NEXT: %21:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32) - ; RV32-NEXT: G_STORE %21(p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va) + ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32) + ; RV32-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va) ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32) ; RV32-NEXT: PseudoRET implicit $x10 @@ -1763,8 +1763,8 @@ define i32 @va_large_stack(ptr %fmt, ...) { ; RV64-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s64) into %ir.va) ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va, align 4) ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 - ; RV64-NEXT: %21:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64) - ; RV64-NEXT: G_STORE %21(p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4) + ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64) + ; RV64-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4) ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32) ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64) diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll index ea08061221fd4..a982de8301e2c 100644 --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -75,6 +75,7 @@ ; CHECK-NEXT: CodeGen Prepare ; CHECK-NEXT: Dominator Tree Construction ; CHECK-NEXT: Exception handling preparation +; CHECK-NEXT: RISC-V Promote Constant ; CHECK-NEXT: A No-Op Barrier Pass ; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Merge internal globals diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll index 6f7c30edba3ea..3d1b0d8cc9658 100644 --- a/llvm/test/CodeGen/RISCV/double-imm.ll +++ b/llvm/test/CodeGen/RISCV/double-imm.ll @@ -17,8 +17,8 @@ define double @double_imm() nounwind { ; ; CHECK64D-LABEL: double_imm: ; CHECK64D: # %bb.0: -; CHECK64D-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK64D-NEXT: fld fa0, %lo(.LCPI0_0)(a0) +; CHECK64D-NEXT: lui a0, %hi(.promoted_doubles.double_imm) +; CHECK64D-NEXT: fld fa0, %lo(.promoted_doubles.double_imm)(a0) ; CHECK64D-NEXT: ret ; ; CHECKRV32ZDINX-LABEL: double_imm: @@ -31,8 +31,8 @@ define double @double_imm() nounwind { ; ; CHECKRV64ZDINX-LABEL: double_imm: ; CHECKRV64ZDINX: # %bb.0: -; CHECKRV64ZDINX-NEXT: lui a0, %hi(.LCPI0_0) -; CHECKRV64ZDINX-NEXT: ld a0, %lo(.LCPI0_0)(a0) +; CHECKRV64ZDINX-NEXT: lui a0, %hi(.promoted_doubles.double_imm) +; CHECKRV64ZDINX-NEXT: ld a0, %lo(.promoted_doubles.double_imm)(a0) ; CHECKRV64ZDINX-NEXT: ret ret double 3.1415926535897931159979634685441851615905761718750 } diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll index 1deea55b083ce..0d4a69b69fb8c 100644 --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ -; RUN: -target-abi=ilp32d | FileCheck %s +; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECK,CHECKRV32D %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ -; RUN: -target-abi=lp64d | FileCheck %s +; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECK,CHECKRV64D %s ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKRV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ @@ -640,6 +640,39 @@ define signext i32 @select_fcmp_uge_1_2(double %a, double %b) nounwind { } define double @CascadedSelect(double noundef %a) { +; CHECKRV32D-LABEL: CascadedSelect: +; CHECKRV32D: # %bb.0: # %entry +; CHECKRV32D-NEXT: lui a0, %hi(.LCPI20_0) +; CHECKRV32D-NEXT: fld fa5, %lo(.LCPI20_0)(a0) +; CHECKRV32D-NEXT: flt.d a0, fa5, fa0 +; CHECKRV32D-NEXT: bnez a0, .LBB20_3 +; CHECKRV32D-NEXT: # %bb.1: # %entry +; CHECKRV32D-NEXT: fcvt.d.w fa5, zero +; CHECKRV32D-NEXT: flt.d a0, fa0, fa5 +; CHECKRV32D-NEXT: bnez a0, .LBB20_3 +; CHECKRV32D-NEXT: # %bb.2: # %entry +; CHECKRV32D-NEXT: fmv.d fa5, fa0 +; CHECKRV32D-NEXT: .LBB20_3: # %entry +; CHECKRV32D-NEXT: fmv.d fa0, fa5 +; CHECKRV32D-NEXT: ret +; +; CHECKRV64D-LABEL: CascadedSelect: +; CHECKRV64D: # %bb.0: # %entry +; CHECKRV64D-NEXT: li a0, 1023 +; CHECKRV64D-NEXT: slli a0, a0, 52 +; CHECKRV64D-NEXT: fmv.d.x fa5, a0 +; CHECKRV64D-NEXT: flt.d a0, fa5, fa0 +; CHECKRV64D-NEXT: bnez a0, .LBB20_3 +; CHECKRV64D-NEXT: # %bb.1: # %entry +; CHECKRV64D-NEXT: fmv.d.x fa5, zero +; CHECKRV64D-NEXT: flt.d a0, fa0, fa5 +; CHECKRV64D-NEXT: bnez a0, .LBB20_3 +; CHECKRV64D-NEXT: # %bb.2: # %entry +; CHECKRV64D-NEXT: fmv.d fa5, fa0 +; CHECKRV64D-NEXT: .LBB20_3: # %entry +; CHECKRV64D-NEXT: fmv.d fa0, fa5 +; CHECKRV64D-NEXT: ret +; ; CHECKRV32ZDINX-LABEL: CascadedSelect: ; CHECKRV32ZDINX: # %bb.0: # %entry ; CHECKRV32ZDINX-NEXT: lui a3, %hi(.LCPI20_0) diff --git a/llvm/test/CodeGen/RISCV/double-zfa.ll b/llvm/test/CodeGen/RISCV/double-zfa.ll index f17c63ddb6cae..89f8a8dddb50c 100644 --- a/llvm/test/CodeGen/RISCV/double-zfa.ll +++ b/llvm/test/CodeGen/RISCV/double-zfa.ll @@ -103,21 +103,33 @@ define double @loadfpimm10() { ; Negative test. This is a qnan with payload of 1. define double @loadfpimm11() { -; CHECK-LABEL: loadfpimm11: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI10_0)(a0) -; CHECK-NEXT: ret +; RV32IDZFA-LABEL: loadfpimm11: +; RV32IDZFA: # %bb.0: +; RV32IDZFA-NEXT: lui a0, %hi(.LCPI10_0) +; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI10_0)(a0) +; RV32IDZFA-NEXT: ret +; +; RV64DZFA-LABEL: loadfpimm11: +; RV64DZFA: # %bb.0: +; RV64DZFA-NEXT: lui a0, %hi(.promoted_doubles.loadfpimm11) +; RV64DZFA-NEXT: fld fa0, %lo(.promoted_doubles.loadfpimm11)(a0) +; RV64DZFA-NEXT: ret ret double 0x7ff8000000000001 } ; Negative test. This is an snan with payload of 1. define double @loadfpimm12() { -; CHECK-LABEL: loadfpimm12: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI11_0) -; CHECK-NEXT: fld fa0, %lo(.LCPI11_0)(a0) -; CHECK-NEXT: ret +; RV32IDZFA-LABEL: loadfpimm12: +; RV32IDZFA: # %bb.0: +; RV32IDZFA-NEXT: lui a0, %hi(.LCPI11_0) +; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI11_0)(a0) +; RV32IDZFA-NEXT: ret +; +; RV64DZFA-LABEL: loadfpimm12: +; RV64DZFA: # %bb.0: +; RV64DZFA-NEXT: lui a0, %hi(.promoted_doubles.loadfpimm12) +; RV64DZFA-NEXT: fld fa0, %lo(.promoted_doubles.loadfpimm12)(a0) +; RV64DZFA-NEXT: ret ret double 0x7ff0000000000001 } diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll index 3dd99f3d49d2d..c6d35172f26d2 100644 --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -18,13 +18,13 @@ ; RUN: -verify-machineinstrs \ ; RUN: | FileCheck -check-prefix=ILP32E-WITHFP %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs \ -; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s +; RUN: | FileCheck -check-prefixes=RV64,LP64-LP64F-LP64D-FPELIM %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64f \ ; RUN: -verify-machineinstrs \ -; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s +; RUN: | FileCheck -check-prefixes=RV64D-LP64F,LP64-LP64F-LP64D-FPELIM %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \ ; RUN: -verify-machineinstrs \ -; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s +; RUN: | FileCheck -check-prefixes=RV64D-LP64D,LP64-LP64F-LP64D-FPELIM %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all \ ; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-WITHFP %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -target-abi lp64e \ @@ -158,23 +158,59 @@ define i32 @va1(ptr %fmt, ...) { ; ILP32E-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va1: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 -; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 80 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 28 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 -; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va1: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: .cfi_def_cfa_offset 80 +; RV64-NEXT: sd a1, 24(sp) +; RV64-NEXT: addi a0, sp, 28 +; RV64-NEXT: sd a0, 8(sp) +; RV64-NEXT: lw a0, 24(sp) +; RV64-NEXT: sd a5, 56(sp) +; RV64-NEXT: sd a6, 64(sp) +; RV64-NEXT: sd a7, 72(sp) +; RV64-NEXT: sd a2, 32(sp) +; RV64-NEXT: sd a3, 40(sp) +; RV64-NEXT: sd a4, 48(sp) +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va1: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -80 +; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 80 +; RV64D-LP64F-NEXT: sd a1, 24(sp) +; RV64D-LP64F-NEXT: addi a0, sp, 28 +; RV64D-LP64F-NEXT: sd a0, 8(sp) +; RV64D-LP64F-NEXT: lw a0, 24(sp) +; RV64D-LP64F-NEXT: sd a5, 56(sp) +; RV64D-LP64F-NEXT: sd a6, 64(sp) +; RV64D-LP64F-NEXT: sd a7, 72(sp) +; RV64D-LP64F-NEXT: sd a2, 32(sp) +; RV64D-LP64F-NEXT: sd a3, 40(sp) +; RV64D-LP64F-NEXT: sd a4, 48(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 80 +; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 0 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va1: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -80 +; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 80 +; RV64D-LP64D-NEXT: sd a1, 24(sp) +; RV64D-LP64D-NEXT: addi a0, sp, 28 +; RV64D-LP64D-NEXT: sd a0, 8(sp) +; RV64D-LP64D-NEXT: lw a0, 24(sp) +; RV64D-LP64D-NEXT: sd a5, 56(sp) +; RV64D-LP64D-NEXT: sd a6, 64(sp) +; RV64D-LP64D-NEXT: sd a7, 72(sp) +; RV64D-LP64D-NEXT: sd a2, 32(sp) +; RV64D-LP64D-NEXT: sd a3, 40(sp) +; RV64D-LP64D-NEXT: sd a4, 48(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 80 +; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 0 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -344,21 +380,53 @@ define i32 @va1_va_arg(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 -; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va1_va_arg: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: sd a5, 56(sp) +; RV64-NEXT: sd a6, 64(sp) +; RV64-NEXT: sd a7, 72(sp) +; RV64-NEXT: sd a1, 24(sp) +; RV64-NEXT: sd a2, 32(sp) +; RV64-NEXT: sd a3, 40(sp) +; RV64-NEXT: sd a4, 48(sp) +; RV64-NEXT: addi a1, sp, 32 +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va1_va_arg: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -80 +; RV64D-LP64F-NEXT: mv a0, a1 +; RV64D-LP64F-NEXT: sd a5, 56(sp) +; RV64D-LP64F-NEXT: sd a6, 64(sp) +; RV64D-LP64F-NEXT: sd a7, 72(sp) +; RV64D-LP64F-NEXT: sd a1, 24(sp) +; RV64D-LP64F-NEXT: sd a2, 32(sp) +; RV64D-LP64F-NEXT: sd a3, 40(sp) +; RV64D-LP64F-NEXT: sd a4, 48(sp) +; RV64D-LP64F-NEXT: addi a1, sp, 32 +; RV64D-LP64F-NEXT: sd a1, 8(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 80 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va1_va_arg: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -80 +; RV64D-LP64D-NEXT: mv a0, a1 +; RV64D-LP64D-NEXT: sd a5, 56(sp) +; RV64D-LP64D-NEXT: sd a6, 64(sp) +; RV64D-LP64D-NEXT: sd a7, 72(sp) +; RV64D-LP64D-NEXT: sd a1, 24(sp) +; RV64D-LP64D-NEXT: sd a2, 32(sp) +; RV64D-LP64D-NEXT: sd a3, 40(sp) +; RV64D-LP64D-NEXT: sd a4, 48(sp) +; RV64D-LP64D-NEXT: addi a1, sp, 32 +; RV64D-LP64D-NEXT: sd a1, 8(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 80 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -569,37 +637,101 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 40 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg_alloca: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -96 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: addi s0, sp, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: mv s1, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, s0, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, -32(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a1, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: srli a0, a0, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 15 -; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -16 -; LP64-LP64F-LP64D-FPELIM-NEXT: sub a0, sp, a0 -; LP64-LP64F-LP64D-FPELIM-NEXT: mv sp, a0 -; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead -; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, s1 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, s0, -32 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 96 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va1_va_arg_alloca: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -96 +; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: addi s0, sp, 32 +; RV64-NEXT: mv s1, a1 +; RV64-NEXT: sd a5, 40(s0) +; RV64-NEXT: sd a6, 48(s0) +; RV64-NEXT: sd a7, 56(s0) +; RV64-NEXT: sd a1, 8(s0) +; RV64-NEXT: sd a2, 16(s0) +; RV64-NEXT: sd a3, 24(s0) +; RV64-NEXT: sd a4, 32(s0) +; RV64-NEXT: addi a0, s0, 16 +; RV64-NEXT: sd a0, -32(s0) +; RV64-NEXT: slli a0, a1, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: addi a0, a0, 15 +; RV64-NEXT: andi a0, a0, -16 +; RV64-NEXT: sub a0, sp, a0 +; RV64-NEXT: mv sp, a0 +; RV64-NEXT: call notdead +; RV64-NEXT: mv a0, s1 +; RV64-NEXT: addi sp, s0, -32 +; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 96 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va1_va_arg_alloca: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -96 +; RV64D-LP64F-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: addi s0, sp, 32 +; RV64D-LP64F-NEXT: mv s1, a1 +; RV64D-LP64F-NEXT: sd a5, 40(s0) +; RV64D-LP64F-NEXT: sd a6, 48(s0) +; RV64D-LP64F-NEXT: sd a7, 56(s0) +; RV64D-LP64F-NEXT: sd a1, 8(s0) +; RV64D-LP64F-NEXT: sd a2, 16(s0) +; RV64D-LP64F-NEXT: sd a3, 24(s0) +; RV64D-LP64F-NEXT: sd a4, 32(s0) +; RV64D-LP64F-NEXT: addi a0, s0, 16 +; RV64D-LP64F-NEXT: sd a0, -32(s0) +; RV64D-LP64F-NEXT: slli a0, a1, 32 +; RV64D-LP64F-NEXT: srli a0, a0, 32 +; RV64D-LP64F-NEXT: addi a0, a0, 15 +; RV64D-LP64F-NEXT: andi a0, a0, -16 +; RV64D-LP64F-NEXT: sub a0, sp, a0 +; RV64D-LP64F-NEXT: mv sp, a0 +; RV64D-LP64F-NEXT: call notdead +; RV64D-LP64F-NEXT: mv a0, s1 +; RV64D-LP64F-NEXT: addi sp, s0, -32 +; RV64D-LP64F-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: addi sp, sp, 96 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va1_va_arg_alloca: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -96 +; RV64D-LP64D-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: addi s0, sp, 32 +; RV64D-LP64D-NEXT: mv s1, a1 +; RV64D-LP64D-NEXT: sd a5, 40(s0) +; RV64D-LP64D-NEXT: sd a6, 48(s0) +; RV64D-LP64D-NEXT: sd a7, 56(s0) +; RV64D-LP64D-NEXT: sd a1, 8(s0) +; RV64D-LP64D-NEXT: sd a2, 16(s0) +; RV64D-LP64D-NEXT: sd a3, 24(s0) +; RV64D-LP64D-NEXT: sd a4, 32(s0) +; RV64D-LP64D-NEXT: addi a0, s0, 16 +; RV64D-LP64D-NEXT: sd a0, -32(s0) +; RV64D-LP64D-NEXT: slli a0, a1, 32 +; RV64D-LP64D-NEXT: srli a0, a0, 32 +; RV64D-LP64D-NEXT: addi a0, a0, 15 +; RV64D-LP64D-NEXT: andi a0, a0, -16 +; RV64D-LP64D-NEXT: sub a0, sp, a0 +; RV64D-LP64D-NEXT: mv sp, a0 +; RV64D-LP64D-NEXT: call notdead +; RV64D-LP64D-NEXT: mv a0, s1 +; RV64D-LP64D-NEXT: addi sp, s0, -32 +; RV64D-LP64D-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: addi sp, sp, 96 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg_alloca: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -769,17 +901,41 @@ define void @va1_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 8 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va1_caller: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2 -; LP64-LP64F-LP64D-FPELIM-NEXT: call va1 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va1_caller: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: li a1, 1023 +; RV64-NEXT: slli a1, a1, 52 +; RV64-NEXT: li a2, 2 +; RV64-NEXT: call va1 +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va1_caller: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -16 +; RV64D-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: li a1, 1023 +; RV64D-LP64F-NEXT: slli a1, a1, 52 +; RV64D-LP64F-NEXT: li a2, 2 +; RV64D-LP64F-NEXT: call va1 +; RV64D-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: addi sp, sp, 16 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va1_caller: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -16 +; RV64D-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: li a1, 1023 +; RV64D-LP64D-NEXT: slli a1, a1, 52 +; RV64D-LP64D-NEXT: li a2, 2 +; RV64D-LP64D-NEXT: call va1 +; RV64D-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: addi sp, sp, 16 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -941,21 +1097,53 @@ define i64 @va2(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va2: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 -; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 39 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va2: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: sd a5, 56(sp) +; RV64-NEXT: sd a6, 64(sp) +; RV64-NEXT: sd a7, 72(sp) +; RV64-NEXT: sd a1, 24(sp) +; RV64-NEXT: sd a2, 32(sp) +; RV64-NEXT: sd a3, 40(sp) +; RV64-NEXT: sd a4, 48(sp) +; RV64-NEXT: addi a1, sp, 39 +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va2: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -80 +; RV64D-LP64F-NEXT: mv a0, a1 +; RV64D-LP64F-NEXT: sd a5, 56(sp) +; RV64D-LP64F-NEXT: sd a6, 64(sp) +; RV64D-LP64F-NEXT: sd a7, 72(sp) +; RV64D-LP64F-NEXT: sd a1, 24(sp) +; RV64D-LP64F-NEXT: sd a2, 32(sp) +; RV64D-LP64F-NEXT: sd a3, 40(sp) +; RV64D-LP64F-NEXT: sd a4, 48(sp) +; RV64D-LP64F-NEXT: addi a1, sp, 39 +; RV64D-LP64F-NEXT: sd a1, 8(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 80 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va2: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -80 +; RV64D-LP64D-NEXT: mv a0, a1 +; RV64D-LP64D-NEXT: sd a5, 56(sp) +; RV64D-LP64D-NEXT: sd a6, 64(sp) +; RV64D-LP64D-NEXT: sd a7, 72(sp) +; RV64D-LP64D-NEXT: sd a1, 24(sp) +; RV64D-LP64D-NEXT: sd a2, 32(sp) +; RV64D-LP64D-NEXT: sd a3, 40(sp) +; RV64D-LP64D-NEXT: sd a4, 48(sp) +; RV64D-LP64D-NEXT: addi a1, sp, 39 +; RV64D-LP64D-NEXT: sd a1, 8(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 80 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va2: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1138,21 +1326,53 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va2_va_arg: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 -; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va2_va_arg: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: sd a5, 56(sp) +; RV64-NEXT: sd a6, 64(sp) +; RV64-NEXT: sd a7, 72(sp) +; RV64-NEXT: sd a1, 24(sp) +; RV64-NEXT: sd a2, 32(sp) +; RV64-NEXT: sd a3, 40(sp) +; RV64-NEXT: sd a4, 48(sp) +; RV64-NEXT: addi a1, sp, 32 +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va2_va_arg: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -80 +; RV64D-LP64F-NEXT: mv a0, a1 +; RV64D-LP64F-NEXT: sd a5, 56(sp) +; RV64D-LP64F-NEXT: sd a6, 64(sp) +; RV64D-LP64F-NEXT: sd a7, 72(sp) +; RV64D-LP64F-NEXT: sd a1, 24(sp) +; RV64D-LP64F-NEXT: sd a2, 32(sp) +; RV64D-LP64F-NEXT: sd a3, 40(sp) +; RV64D-LP64F-NEXT: sd a4, 48(sp) +; RV64D-LP64F-NEXT: addi a1, sp, 32 +; RV64D-LP64F-NEXT: sd a1, 8(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 80 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va2_va_arg: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -80 +; RV64D-LP64D-NEXT: mv a0, a1 +; RV64D-LP64D-NEXT: sd a5, 56(sp) +; RV64D-LP64D-NEXT: sd a6, 64(sp) +; RV64D-LP64D-NEXT: sd a7, 72(sp) +; RV64D-LP64D-NEXT: sd a1, 24(sp) +; RV64D-LP64D-NEXT: sd a2, 32(sp) +; RV64D-LP64D-NEXT: sd a3, 40(sp) +; RV64D-LP64D-NEXT: sd a4, 48(sp) +; RV64D-LP64D-NEXT: addi a1, sp, 32 +; RV64D-LP64D-NEXT: sd a1, 8(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 80 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va2_va_arg: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1277,16 +1497,38 @@ define void @va2_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 8 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va2_caller: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52 -; LP64-LP64F-LP64D-FPELIM-NEXT: call va2 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va2_caller: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: li a1, 1023 +; RV64-NEXT: slli a1, a1, 52 +; RV64-NEXT: call va2 +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va2_caller: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -16 +; RV64D-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: li a1, 1023 +; RV64D-LP64F-NEXT: slli a1, a1, 52 +; RV64D-LP64F-NEXT: call va2 +; RV64D-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: addi sp, sp, 16 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va2_caller: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -16 +; RV64D-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: li a1, 1023 +; RV64D-LP64D-NEXT: slli a1, a1, 52 +; RV64D-LP64D-NEXT: call va2 +; RV64D-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: addi sp, sp, 16 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va2_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1455,20 +1697,50 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 28 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va3: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 31 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va3: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -64 +; RV64-NEXT: sd a6, 48(sp) +; RV64-NEXT: sd a7, 56(sp) +; RV64-NEXT: sd a2, 16(sp) +; RV64-NEXT: sd a3, 24(sp) +; RV64-NEXT: sd a4, 32(sp) +; RV64-NEXT: sd a5, 40(sp) +; RV64-NEXT: addi a3, sp, 31 +; RV64-NEXT: add a0, a1, a2 +; RV64-NEXT: sd a3, 8(sp) +; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va3: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -64 +; RV64D-LP64F-NEXT: sd a6, 48(sp) +; RV64D-LP64F-NEXT: sd a7, 56(sp) +; RV64D-LP64F-NEXT: sd a2, 16(sp) +; RV64D-LP64F-NEXT: sd a3, 24(sp) +; RV64D-LP64F-NEXT: sd a4, 32(sp) +; RV64D-LP64F-NEXT: sd a5, 40(sp) +; RV64D-LP64F-NEXT: addi a3, sp, 31 +; RV64D-LP64F-NEXT: add a0, a1, a2 +; RV64D-LP64F-NEXT: sd a3, 8(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 64 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va3: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -64 +; RV64D-LP64D-NEXT: sd a6, 48(sp) +; RV64D-LP64D-NEXT: sd a7, 56(sp) +; RV64D-LP64D-NEXT: sd a2, 16(sp) +; RV64D-LP64D-NEXT: sd a3, 24(sp) +; RV64D-LP64D-NEXT: sd a4, 32(sp) +; RV64D-LP64D-NEXT: sd a5, 40(sp) +; RV64D-LP64D-NEXT: addi a3, sp, 31 +; RV64D-LP64D-NEXT: add a0, a1, a2 +; RV64D-LP64D-NEXT: sd a3, 8(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 64 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va3: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1659,20 +1931,50 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 28 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va3_va_arg: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 24 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va3_va_arg: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -64 +; RV64-NEXT: sd a6, 48(sp) +; RV64-NEXT: sd a7, 56(sp) +; RV64-NEXT: sd a2, 16(sp) +; RV64-NEXT: sd a3, 24(sp) +; RV64-NEXT: sd a4, 32(sp) +; RV64-NEXT: sd a5, 40(sp) +; RV64-NEXT: addi a3, sp, 24 +; RV64-NEXT: add a0, a1, a2 +; RV64-NEXT: sd a3, 8(sp) +; RV64-NEXT: addi sp, sp, 64 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va3_va_arg: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -64 +; RV64D-LP64F-NEXT: sd a6, 48(sp) +; RV64D-LP64F-NEXT: sd a7, 56(sp) +; RV64D-LP64F-NEXT: sd a2, 16(sp) +; RV64D-LP64F-NEXT: sd a3, 24(sp) +; RV64D-LP64F-NEXT: sd a4, 32(sp) +; RV64D-LP64F-NEXT: sd a5, 40(sp) +; RV64D-LP64F-NEXT: addi a3, sp, 24 +; RV64D-LP64F-NEXT: add a0, a1, a2 +; RV64D-LP64F-NEXT: sd a3, 8(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 64 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va3_va_arg: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -64 +; RV64D-LP64D-NEXT: sd a6, 48(sp) +; RV64D-LP64D-NEXT: sd a7, 56(sp) +; RV64D-LP64D-NEXT: sd a2, 16(sp) +; RV64D-LP64D-NEXT: sd a3, 24(sp) +; RV64D-LP64D-NEXT: sd a4, 32(sp) +; RV64D-LP64D-NEXT: sd a5, 40(sp) +; RV64D-LP64D-NEXT: addi a3, sp, 24 +; RV64D-LP64D-NEXT: add a0, a1, a2 +; RV64D-LP64D-NEXT: sd a3, 8(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 64 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va3_va_arg: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1810,18 +2112,44 @@ define void @va3_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 8 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va3_caller: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 1 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 62 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111 -; LP64-LP64F-LP64D-FPELIM-NEXT: call va3 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va3_caller: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64-NEXT: li a2, 1 +; RV64-NEXT: li a0, 2 +; RV64-NEXT: slli a2, a2, 62 +; RV64-NEXT: li a1, 1111 +; RV64-NEXT: call va3 +; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va3_caller: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -16 +; RV64D-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: li a2, 1 +; RV64D-LP64F-NEXT: li a0, 2 +; RV64D-LP64F-NEXT: slli a2, a2, 62 +; RV64D-LP64F-NEXT: li a1, 1111 +; RV64D-LP64F-NEXT: call va3 +; RV64D-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: addi sp, sp, 16 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va3_caller: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -16 +; RV64D-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: li a2, 1 +; RV64D-LP64D-NEXT: li a0, 2 +; RV64D-LP64D-NEXT: slli a2, a2, 62 +; RV64D-LP64D-NEXT: li a1, 1111 +; RV64D-LP64D-NEXT: call va3 +; RV64D-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: addi sp, sp, 16 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va3_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2081,46 +2409,128 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 44 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va4_va_copy: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -96 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: mv s0, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 72(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 80(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 88(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 64(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 48 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 0(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 3 -; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a0, 8 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a0, 11 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: andi a2, a2, -4 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a2, 8 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a2, 11 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: andi a3, a3, -4 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a1, 0(a2) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a3, 8 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a2, 0(a3) -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a0, s0 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a0, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: addw a0, a0, a2 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 96 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va4_va_copy: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -96 +; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64-NEXT: mv s0, a1 +; RV64-NEXT: sd a5, 72(sp) +; RV64-NEXT: sd a6, 80(sp) +; RV64-NEXT: sd a7, 88(sp) +; RV64-NEXT: sd a1, 40(sp) +; RV64-NEXT: sd a2, 48(sp) +; RV64-NEXT: sd a3, 56(sp) +; RV64-NEXT: sd a4, 64(sp) +; RV64-NEXT: addi a0, sp, 48 +; RV64-NEXT: sd a0, 8(sp) +; RV64-NEXT: sd a0, 0(sp) +; RV64-NEXT: call notdead +; RV64-NEXT: ld a0, 8(sp) +; RV64-NEXT: addi a0, a0, 3 +; RV64-NEXT: andi a0, a0, -4 +; RV64-NEXT: addi a1, a0, 8 +; RV64-NEXT: addi a2, a0, 11 +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: andi a2, a2, -4 +; RV64-NEXT: ld a0, 0(a0) +; RV64-NEXT: addi a1, a2, 8 +; RV64-NEXT: addi a3, a2, 11 +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: andi a3, a3, -4 +; RV64-NEXT: ld a1, 0(a2) +; RV64-NEXT: addi a2, a3, 8 +; RV64-NEXT: sd a2, 8(sp) +; RV64-NEXT: ld a2, 0(a3) +; RV64-NEXT: add a0, a0, s0 +; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: addw a0, a0, a2 +; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 96 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va4_va_copy: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -96 +; RV64D-LP64F-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: mv s0, a1 +; RV64D-LP64F-NEXT: sd a5, 72(sp) +; RV64D-LP64F-NEXT: sd a6, 80(sp) +; RV64D-LP64F-NEXT: sd a7, 88(sp) +; RV64D-LP64F-NEXT: sd a1, 40(sp) +; RV64D-LP64F-NEXT: sd a2, 48(sp) +; RV64D-LP64F-NEXT: sd a3, 56(sp) +; RV64D-LP64F-NEXT: sd a4, 64(sp) +; RV64D-LP64F-NEXT: addi a0, sp, 48 +; RV64D-LP64F-NEXT: sd a0, 8(sp) +; RV64D-LP64F-NEXT: sd a0, 0(sp) +; RV64D-LP64F-NEXT: call notdead +; RV64D-LP64F-NEXT: ld a0, 8(sp) +; RV64D-LP64F-NEXT: addi a0, a0, 3 +; RV64D-LP64F-NEXT: andi a0, a0, -4 +; RV64D-LP64F-NEXT: addi a1, a0, 8 +; RV64D-LP64F-NEXT: addi a2, a0, 11 +; RV64D-LP64F-NEXT: sd a1, 8(sp) +; RV64D-LP64F-NEXT: andi a2, a2, -4 +; RV64D-LP64F-NEXT: ld a0, 0(a0) +; RV64D-LP64F-NEXT: addi a1, a2, 8 +; RV64D-LP64F-NEXT: addi a3, a2, 11 +; RV64D-LP64F-NEXT: sd a1, 8(sp) +; RV64D-LP64F-NEXT: andi a3, a3, -4 +; RV64D-LP64F-NEXT: ld a1, 0(a2) +; RV64D-LP64F-NEXT: addi a2, a3, 8 +; RV64D-LP64F-NEXT: sd a2, 8(sp) +; RV64D-LP64F-NEXT: ld a2, 0(a3) +; RV64D-LP64F-NEXT: add a0, a0, s0 +; RV64D-LP64F-NEXT: add a0, a0, a1 +; RV64D-LP64F-NEXT: addw a0, a0, a2 +; RV64D-LP64F-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: addi sp, sp, 96 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va4_va_copy: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -96 +; RV64D-LP64D-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: mv s0, a1 +; RV64D-LP64D-NEXT: sd a5, 72(sp) +; RV64D-LP64D-NEXT: sd a6, 80(sp) +; RV64D-LP64D-NEXT: sd a7, 88(sp) +; RV64D-LP64D-NEXT: sd a1, 40(sp) +; RV64D-LP64D-NEXT: sd a2, 48(sp) +; RV64D-LP64D-NEXT: sd a3, 56(sp) +; RV64D-LP64D-NEXT: sd a4, 64(sp) +; RV64D-LP64D-NEXT: addi a0, sp, 48 +; RV64D-LP64D-NEXT: sd a0, 8(sp) +; RV64D-LP64D-NEXT: sd a0, 0(sp) +; RV64D-LP64D-NEXT: call notdead +; RV64D-LP64D-NEXT: ld a0, 8(sp) +; RV64D-LP64D-NEXT: addi a0, a0, 3 +; RV64D-LP64D-NEXT: andi a0, a0, -4 +; RV64D-LP64D-NEXT: addi a1, a0, 8 +; RV64D-LP64D-NEXT: addi a2, a0, 11 +; RV64D-LP64D-NEXT: sd a1, 8(sp) +; RV64D-LP64D-NEXT: andi a2, a2, -4 +; RV64D-LP64D-NEXT: ld a0, 0(a0) +; RV64D-LP64D-NEXT: addi a1, a2, 8 +; RV64D-LP64D-NEXT: addi a3, a2, 11 +; RV64D-LP64D-NEXT: sd a1, 8(sp) +; RV64D-LP64D-NEXT: andi a3, a3, -4 +; RV64D-LP64D-NEXT: ld a1, 0(a2) +; RV64D-LP64D-NEXT: addi a2, a3, 8 +; RV64D-LP64D-NEXT: sd a2, 8(sp) +; RV64D-LP64D-NEXT: ld a2, 0(a3) +; RV64D-LP64D-NEXT: add a0, a0, s0 +; RV64D-LP64D-NEXT: add a0, a0, a1 +; RV64D-LP64D-NEXT: addw a0, a0, a2 +; RV64D-LP64D-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: addi sp, sp, 96 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va4_va_copy: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2501,35 +2911,95 @@ define void @va5_aligned_stack_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 64 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va5_aligned_stack_caller: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -48 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li t0, 17 -; LP64-LP64F-LP64D-FPELIM-NEXT: li t1, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: li t2, 15 -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a2, %hi(.LCPI11_0) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a3, %hi(.LCPI11_1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a6, %hi(.LCPI11_2) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui t3, 2384 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 11 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a4, 12 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a5, 13 -; LP64-LP64F-LP64D-FPELIM-NEXT: li a7, 14 -; LP64-LP64F-LP64D-FPELIM-NEXT: ld t4, %lo(.LCPI11_0)(a2) -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a2, %lo(.LCPI11_1)(a3) -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a3, %lo(.LCPI11_2)(a6) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a6, t3, 761 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a6, 11 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd t4, 0(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd t2, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd t1, 16(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd t0, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee -; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 48 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va5_aligned_stack_caller: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -48 +; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; RV64-NEXT: li t0, 17 +; RV64-NEXT: li t1, 16 +; RV64-NEXT: li t2, 15 +; RV64-NEXT: lui a2, %hi(.LCPI11_0) +; RV64-NEXT: lui a3, %hi(.LCPI11_1) +; RV64-NEXT: lui a6, %hi(.LCPI11_2) +; RV64-NEXT: lui t3, 2384 +; RV64-NEXT: li a0, 1 +; RV64-NEXT: li a1, 11 +; RV64-NEXT: li a4, 12 +; RV64-NEXT: li a5, 13 +; RV64-NEXT: li a7, 14 +; RV64-NEXT: ld t4, %lo(.LCPI11_0)(a2) +; RV64-NEXT: ld a2, %lo(.LCPI11_1)(a3) +; RV64-NEXT: ld a3, %lo(.LCPI11_2)(a6) +; RV64-NEXT: addi a6, t3, 761 +; RV64-NEXT: slli a6, a6, 11 +; RV64-NEXT: sd t4, 0(sp) +; RV64-NEXT: sd t2, 8(sp) +; RV64-NEXT: sd t1, 16(sp) +; RV64-NEXT: sd t0, 24(sp) +; RV64-NEXT: call va5_aligned_stack_callee +; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; RV64-NEXT: addi sp, sp, 48 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va5_aligned_stack_caller: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -48 +; RV64D-LP64F-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; RV64D-LP64F-NEXT: lui a0, %hi(.promoted_doubles.va5_aligned_stack_caller) +; RV64D-LP64F-NEXT: fld fa5, %lo(.promoted_doubles.va5_aligned_stack_caller)(a0) +; RV64D-LP64F-NEXT: li t0, 17 +; RV64D-LP64F-NEXT: li t1, 16 +; RV64D-LP64F-NEXT: li t2, 15 +; RV64D-LP64F-NEXT: lui a2, %hi(.LCPI11_0) +; RV64D-LP64F-NEXT: lui a3, %hi(.LCPI11_1) +; RV64D-LP64F-NEXT: lui a6, 2384 +; RV64D-LP64F-NEXT: li a0, 1 +; RV64D-LP64F-NEXT: li a1, 11 +; RV64D-LP64F-NEXT: li a4, 12 +; RV64D-LP64F-NEXT: li a5, 13 +; RV64D-LP64F-NEXT: li a7, 14 +; RV64D-LP64F-NEXT: ld a2, %lo(.LCPI11_0)(a2) +; RV64D-LP64F-NEXT: ld a3, %lo(.LCPI11_1)(a3) +; RV64D-LP64F-NEXT: addi a6, a6, 761 +; RV64D-LP64F-NEXT: slli a6, a6, 11 +; RV64D-LP64F-NEXT: fsd fa5, 0(sp) +; RV64D-LP64F-NEXT: sd t2, 8(sp) +; RV64D-LP64F-NEXT: sd t1, 16(sp) +; RV64D-LP64F-NEXT: sd t0, 24(sp) +; RV64D-LP64F-NEXT: call va5_aligned_stack_callee +; RV64D-LP64F-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; RV64D-LP64F-NEXT: addi sp, sp, 48 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va5_aligned_stack_caller: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -48 +; RV64D-LP64D-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; RV64D-LP64D-NEXT: lui a0, %hi(.promoted_doubles.va5_aligned_stack_caller) +; RV64D-LP64D-NEXT: fld fa5, %lo(.promoted_doubles.va5_aligned_stack_caller)(a0) +; RV64D-LP64D-NEXT: li t0, 17 +; RV64D-LP64D-NEXT: li t1, 16 +; RV64D-LP64D-NEXT: li t2, 15 +; RV64D-LP64D-NEXT: lui a2, %hi(.LCPI11_0) +; RV64D-LP64D-NEXT: lui a3, %hi(.LCPI11_1) +; RV64D-LP64D-NEXT: lui a6, 2384 +; RV64D-LP64D-NEXT: li a0, 1 +; RV64D-LP64D-NEXT: li a1, 11 +; RV64D-LP64D-NEXT: li a4, 12 +; RV64D-LP64D-NEXT: li a5, 13 +; RV64D-LP64D-NEXT: li a7, 14 +; RV64D-LP64D-NEXT: ld a2, %lo(.LCPI11_0)(a2) +; RV64D-LP64D-NEXT: ld a3, %lo(.LCPI11_1)(a3) +; RV64D-LP64D-NEXT: addi a6, a6, 761 +; RV64D-LP64D-NEXT: slli a6, a6, 11 +; RV64D-LP64D-NEXT: fsd fa5, 0(sp) +; RV64D-LP64D-NEXT: sd t2, 8(sp) +; RV64D-LP64D-NEXT: sd t1, 16(sp) +; RV64D-LP64D-NEXT: sd t0, 24(sp) +; RV64D-LP64D-NEXT: call va5_aligned_stack_callee +; RV64D-LP64D-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; RV64D-LP64D-NEXT: addi sp, sp, 48 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va5_aligned_stack_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2727,21 +3197,53 @@ define i32 @va6_no_fixed_args(...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va6_no_fixed_args: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 24 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va6_no_fixed_args: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -80 +; RV64-NEXT: sd a4, 48(sp) +; RV64-NEXT: sd a5, 56(sp) +; RV64-NEXT: sd a6, 64(sp) +; RV64-NEXT: sd a7, 72(sp) +; RV64-NEXT: sd a0, 16(sp) +; RV64-NEXT: sd a1, 24(sp) +; RV64-NEXT: sd a2, 32(sp) +; RV64-NEXT: sd a3, 40(sp) +; RV64-NEXT: addi a1, sp, 24 +; RV64-NEXT: sd a1, 8(sp) +; RV64-NEXT: addi sp, sp, 80 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va6_no_fixed_args: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: addi sp, sp, -80 +; RV64D-LP64F-NEXT: sd a4, 48(sp) +; RV64D-LP64F-NEXT: sd a5, 56(sp) +; RV64D-LP64F-NEXT: sd a6, 64(sp) +; RV64D-LP64F-NEXT: sd a7, 72(sp) +; RV64D-LP64F-NEXT: sd a0, 16(sp) +; RV64D-LP64F-NEXT: sd a1, 24(sp) +; RV64D-LP64F-NEXT: sd a2, 32(sp) +; RV64D-LP64F-NEXT: sd a3, 40(sp) +; RV64D-LP64F-NEXT: addi a1, sp, 24 +; RV64D-LP64F-NEXT: sd a1, 8(sp) +; RV64D-LP64F-NEXT: addi sp, sp, 80 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va6_no_fixed_args: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: addi sp, sp, -80 +; RV64D-LP64D-NEXT: sd a4, 48(sp) +; RV64D-LP64D-NEXT: sd a5, 56(sp) +; RV64D-LP64D-NEXT: sd a6, 64(sp) +; RV64D-LP64D-NEXT: sd a7, 72(sp) +; RV64D-LP64D-NEXT: sd a0, 16(sp) +; RV64D-LP64D-NEXT: sd a1, 24(sp) +; RV64D-LP64D-NEXT: sd a2, 32(sp) +; RV64D-LP64D-NEXT: sd a3, 40(sp) +; RV64D-LP64D-NEXT: addi a1, sp, 24 +; RV64D-LP64D-NEXT: sd a1, 8(sp) +; RV64D-LP64D-NEXT: addi sp, sp, 80 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va6_no_fixed_args: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2986,45 +3488,125 @@ define i32 @va_large_stack(ptr %fmt, ...) { ; ILP32E-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32E-WITHFP-NEXT: ret ; -; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack: -; LP64-LP64F-LP64D-FPELIM: # %bb.0: -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 336 -; LP64-LP64F-LP64D-FPELIM-NEXT: sub sp, sp, a0 -; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 100000080 -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, sp, a0 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 280(a0) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 284 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, sp, a0 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, sp, a0 -; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 280(a0) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 312(a1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 320(a1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 328(a1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 288(a1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 296(a1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 304(a1) -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 336 -; LP64-LP64F-LP64D-FPELIM-NEXT: add sp, sp, a1 -; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 -; LP64-LP64F-LP64D-FPELIM-NEXT: ret +; RV64-LABEL: va_large_stack: +; RV64: # %bb.0: +; RV64-NEXT: lui a0, 24414 +; RV64-NEXT: addi a0, a0, 336 +; RV64-NEXT: sub sp, sp, a0 +; RV64-NEXT: .cfi_def_cfa_offset 100000080 +; RV64-NEXT: lui a0, 24414 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: sd a1, 280(a0) +; RV64-NEXT: lui a0, 24414 +; RV64-NEXT: addi a0, a0, 284 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: sd a0, 8(sp) +; RV64-NEXT: lui a0, 24414 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: lw a0, 280(a0) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: sd a5, 312(a1) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: sd a6, 320(a1) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: sd a7, 328(a1) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: sd a2, 288(a1) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: sd a3, 296(a1) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: sd a4, 304(a1) +; RV64-NEXT: lui a1, 24414 +; RV64-NEXT: addi a1, a1, 336 +; RV64-NEXT: add sp, sp, a1 +; RV64-NEXT: .cfi_def_cfa_offset 0 +; RV64-NEXT: ret +; +; RV64D-LP64F-LABEL: va_large_stack: +; RV64D-LP64F: # %bb.0: +; RV64D-LP64F-NEXT: lui a0, 24414 +; RV64D-LP64F-NEXT: addi a0, a0, 336 +; RV64D-LP64F-NEXT: sub sp, sp, a0 +; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 100000080 +; RV64D-LP64F-NEXT: lui a0, 24414 +; RV64D-LP64F-NEXT: add a0, sp, a0 +; RV64D-LP64F-NEXT: sd a1, 280(a0) +; RV64D-LP64F-NEXT: lui a0, 24414 +; RV64D-LP64F-NEXT: addi a0, a0, 284 +; RV64D-LP64F-NEXT: add a0, sp, a0 +; RV64D-LP64F-NEXT: sd a0, 8(sp) +; RV64D-LP64F-NEXT: lui a0, 24414 +; RV64D-LP64F-NEXT: add a0, sp, a0 +; RV64D-LP64F-NEXT: lw a0, 280(a0) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: add a1, sp, a1 +; RV64D-LP64F-NEXT: sd a5, 312(a1) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: add a1, sp, a1 +; RV64D-LP64F-NEXT: sd a6, 320(a1) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: add a1, sp, a1 +; RV64D-LP64F-NEXT: sd a7, 328(a1) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: add a1, sp, a1 +; RV64D-LP64F-NEXT: sd a2, 288(a1) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: add a1, sp, a1 +; RV64D-LP64F-NEXT: sd a3, 296(a1) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: add a1, sp, a1 +; RV64D-LP64F-NEXT: sd a4, 304(a1) +; RV64D-LP64F-NEXT: lui a1, 24414 +; RV64D-LP64F-NEXT: addi a1, a1, 336 +; RV64D-LP64F-NEXT: add sp, sp, a1 +; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 0 +; RV64D-LP64F-NEXT: ret +; +; RV64D-LP64D-LABEL: va_large_stack: +; RV64D-LP64D: # %bb.0: +; RV64D-LP64D-NEXT: lui a0, 24414 +; RV64D-LP64D-NEXT: addi a0, a0, 336 +; RV64D-LP64D-NEXT: sub sp, sp, a0 +; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 100000080 +; RV64D-LP64D-NEXT: lui a0, 24414 +; RV64D-LP64D-NEXT: add a0, sp, a0 +; RV64D-LP64D-NEXT: sd a1, 280(a0) +; RV64D-LP64D-NEXT: lui a0, 24414 +; RV64D-LP64D-NEXT: addi a0, a0, 284 +; RV64D-LP64D-NEXT: add a0, sp, a0 +; RV64D-LP64D-NEXT: sd a0, 8(sp) +; RV64D-LP64D-NEXT: lui a0, 24414 +; RV64D-LP64D-NEXT: add a0, sp, a0 +; RV64D-LP64D-NEXT: lw a0, 280(a0) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: add a1, sp, a1 +; RV64D-LP64D-NEXT: sd a5, 312(a1) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: add a1, sp, a1 +; RV64D-LP64D-NEXT: sd a6, 320(a1) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: add a1, sp, a1 +; RV64D-LP64D-NEXT: sd a7, 328(a1) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: add a1, sp, a1 +; RV64D-LP64D-NEXT: sd a2, 288(a1) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: add a1, sp, a1 +; RV64D-LP64D-NEXT: sd a3, 296(a1) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: add a1, sp, a1 +; RV64D-LP64D-NEXT: sd a4, 304(a1) +; RV64D-LP64D-NEXT: lui a1, 24414 +; RV64D-LP64D-NEXT: addi a1, a1, 336 +; RV64D-LP64D-NEXT: add sp, sp, a1 +; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 0 +; RV64D-LP64D-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -3141,3 +3723,5 @@ define i32 @va_large_stack(ptr %fmt, ...) { call void @llvm.va_end(ptr %va) ret i32 %1 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; LP64-LP64F-LP64D-FPELIM: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll index c561b6ddb1add..567db1c85c594 100644 --- a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll +++ b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll @@ -203,8 +203,8 @@ define void @foo6(ptr %p, double %d) nounwind { ; ; RV64ZDINX-LABEL: foo6: ; RV64ZDINX: # %bb.0: # %entry -; RV64ZDINX-NEXT: lui a2, %hi(.LCPI5_0) -; RV64ZDINX-NEXT: ld a2, %lo(.LCPI5_0)(a2) +; RV64ZDINX-NEXT: lui a2, %hi(.promoted_doubles.foo6) +; RV64ZDINX-NEXT: ld a2, %lo(.promoted_doubles.foo6)(a2) ; RV64ZDINX-NEXT: fadd.d a1, a1, a2 ; RV64ZDINX-NEXT: sd a1, 2044(a0) ; RV64ZDINX-NEXT: ret From a25a17d603548f9fdffde2224fda38399d15f23e Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Wed, 24 Sep 2025 15:48:00 +0100 Subject: [PATCH 2/5] formatting --- llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp index 545ab47857e9c..44a51eddc8057 100644 --- a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp +++ b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp @@ -11,8 +11,8 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" -#include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" @@ -46,7 +46,7 @@ class RISCVPromoteConstant : public ModulePass { StringRef getPassName() const override { return "RISC-V Promote Constant"; } - void getAnalysisUsage(AnalysisUsage &AU) const override { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.setPreservesCFG(); } @@ -81,7 +81,8 @@ ModulePass *llvm::createRISCVPromoteConstantPass() { return new RISCVPromoteConstant(); } -bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering *TLI) { +bool RISCVPromoteConstant::runOnFunction(Function &F, + const RISCVTargetLowering *TLI) { // Bail out and make no transformation if the target doesn't support // doubles, or if we're not targeting RV64 as we currently see some // regressions for those targets. @@ -102,7 +103,8 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering for (Use &U : I.operands()) { if (auto *C = dyn_cast(U.get())) { if (C->getType()->isDoubleTy()) { - if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, /*ForCodeSize*/ false)) + if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, + /*ForCodeSize*/ false)) continue; UsesInFunc[C].push_back(&U); if (ConstantMap.find(C) == ConstantMap.end()) { @@ -122,7 +124,8 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { if (auto *C = dyn_cast(PN.getIncomingValue(i))) { if (C->getType()->isDoubleTy()) { - if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, /*ForCodeSize*/ false)) + if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, + /*ForCodeSize*/ false)) continue; UsesInFunc[C].push_back(&PN.getOperandUse(i)); if (ConstantMap.find(C) == ConstantMap.end()) { @@ -144,7 +147,8 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering Module *M = F.getParent(); Type *DoubleTy = Type::getDoubleTy(M->getContext()); ArrayType *ArrayTy = ArrayType::get(DoubleTy, ConstantVector.size()); - Constant *GlobalArrayInitializer = ConstantArray::get(ArrayTy, ConstantVector); + Constant *GlobalArrayInitializer = + ConstantArray::get(ArrayTy, ConstantVector); auto *GlobalArray = new GlobalVariable( *M, ArrayTy, From 99f5483363d7a6dcfc6d94990ab82e27cba16fd4 Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Wed, 1 Oct 2025 15:14:52 +0100 Subject: [PATCH 3/5] Address review comments --- llvm/lib/Target/RISCV/RISCV.h | 1 + .../lib/Target/RISCV/RISCVPromoteConstant.cpp | 119 ++++++-------- llvm/lib/Target/RISCV/RISCVTargetMachine.cpp | 1 + llvm/test/CodeGen/RISCV/O3-pipeline.ll | 2 +- llvm/test/CodeGen/RISCV/double-imm.ll | 8 +- llvm/test/CodeGen/RISCV/double-zfa.ll | 32 ++-- .../CodeGen/RISCV/riscv-promote-constant.ll | 148 ++++++++++++++++++ llvm/test/CodeGen/RISCV/vararg.ll | 28 ++-- .../CodeGen/RISCV/zdinx-boundary-check.ll | 4 +- 9 files changed, 231 insertions(+), 112 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/riscv-promote-constant.ll diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h index 5b0ce521409ad..d43b192d9e5c7 100644 --- a/llvm/lib/Target/RISCV/RISCV.h +++ b/llvm/lib/Target/RISCV/RISCV.h @@ -113,6 +113,7 @@ FunctionPass *createRISCVPreLegalizerCombiner(); void initializeRISCVPreLegalizerCombinerPass(PassRegistry &); ModulePass *createRISCVPromoteConstantPass(); +void initializeRISCVPromoteConstantPass(PassRegistry &); FunctionPass *createRISCVVLOptimizerPass(); void initializeRISCVVLOptimizerPass(PassRegistry &); diff --git a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp index 44a51eddc8057..561269c33d256 100644 --- a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp +++ b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp @@ -33,9 +33,10 @@ using namespace llvm; #define DEBUG_TYPE "riscv-promote-const" +#define RISCV_PROMOTE_CONSTANT_NAME "RISC-V Promote Constants" -STATISTIC(NumPromoted, "Number of promoted constants"); -STATISTIC(NumPromotedUses, "Number of promoted constants uses"); +STATISTIC(NumPromoted, "Number of constant literals promoted to globals"); +STATISTIC(NumPromotedUses, "Number of uses of promoted literal constants"); namespace { @@ -44,7 +45,7 @@ class RISCVPromoteConstant : public ModulePass { static char ID; RISCVPromoteConstant() : ModulePass(ID) {} - StringRef getPassName() const override { return "RISC-V Promote Constant"; } + StringRef getPassName() const override { return RISCV_PROMOTE_CONSTANT_NAME; } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); @@ -54,18 +55,16 @@ class RISCVPromoteConstant : public ModulePass { /// Iterate over the functions and promote the double fp constants that /// would otherwise go into the constant pool to a constant array. bool runOnModule(Module &M) override { - LLVM_DEBUG(dbgs() << getPassName() << '\n'); - // TargetMachine and Subtarget are needed to query isFPImmlegal. Get them - // from TargetPassConfig. + // TargetMachine and Subtarget are needed to query isFPImmlegal. const TargetPassConfig &TPC = getAnalysis(); const TargetMachine &TM = TPC.getTM(); if (skipModule(M)) return false; bool Changed = false; - for (auto &MF : M) { - const RISCVSubtarget &ST = TM.getSubtarget(MF); + for (Function &F : M) { + const RISCVSubtarget &ST = TM.getSubtarget(F); const RISCVTargetLowering *TLI = ST.getTargetLowering(); - Changed |= runOnFunction(MF, TLI); + Changed |= runOnFunction(F, TLI); } return Changed; } @@ -77,75 +76,61 @@ class RISCVPromoteConstant : public ModulePass { char RISCVPromoteConstant::ID = 0; +INITIALIZE_PASS(RISCVPromoteConstant, DEBUG_TYPE, RISCV_PROMOTE_CONSTANT_NAME, + false, false) + ModulePass *llvm::createRISCVPromoteConstantPass() { return new RISCVPromoteConstant(); } bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering *TLI) { + if (F.hasOptNone()) + return false; + // Bail out and make no transformation if the target doesn't support // doubles, or if we're not targeting RV64 as we currently see some // regressions for those targets. if (!TLI->isTypeLegal(MVT::f64) || !TLI->isTypeLegal(MVT::i64)) return false; - // Collect all unique double constants used in the function, and track their - // offset within the newly created global array. Also track uses that will - // be replaced later. - DenseMap ConstantMap; - SmallVector ConstantVector; - DenseMap> UsesInFunc; + // Collect all unique double constants and their uses in the function. Use + // MapVector to preserve insertion order. + MapVector> ConstUsesMap; for (Instruction &I : instructions(F)) { - // PHI nodes are handled specially in a second loop below. - if (isa(I)) - continue; for (Use &U : I.operands()) { if (auto *C = dyn_cast(U.get())) { - if (C->getType()->isDoubleTy()) { - if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, - /*ForCodeSize*/ false)) - continue; - UsesInFunc[C].push_back(&U); - if (ConstantMap.find(C) == ConstantMap.end()) { - ConstantMap[C] = ConstantVector.size(); - ConstantVector.push_back(C); - ++NumPromoted; - } - } + if (!C->getType()->isDoubleTy()) + continue; + if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, + /*ForCodeSize=*/false)) + continue; + ConstUsesMap[C].push_back(&U); } } } - // Collect uses from PHI nodes after other uses, because when transforming - // the function, we handle PHI uses afterwards. - for (BasicBlock &BB : F) { - for (PHINode &PN : BB.phis()) { - for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { - if (auto *C = dyn_cast(PN.getIncomingValue(i))) { - if (C->getType()->isDoubleTy()) { - if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, - /*ForCodeSize*/ false)) - continue; - UsesInFunc[C].push_back(&PN.getOperandUse(i)); - if (ConstantMap.find(C) == ConstantMap.end()) { - ConstantMap[C] = ConstantVector.size(); - ConstantVector.push_back(C); - ++NumPromoted; - } - } - } - } - } + int PromotableConstants = ConstUsesMap.size(); + LLVM_DEBUG(dbgs() << "Found " << PromotableConstants + << " promotable constants in " << F.getName() << "\n"); + // Bail out if no promotable constants found, or if only one is found. + if (PromotableConstants < 2) { + LLVM_DEBUG(dbgs() << "Performing no promotions as insufficient promotable " + "constants found\n"); + return false; } - // Bail out if no promotable constants found. - if (ConstantVector.empty()) - return false; + NumPromoted += PromotableConstants; // Create a global array containing the promoted constants. Module *M = F.getParent(); Type *DoubleTy = Type::getDoubleTy(M->getContext()); + + SmallVector ConstantVector; + for (auto const &Pair : ConstUsesMap) + ConstantVector.push_back(Pair.first); + ArrayType *ArrayTy = ArrayType::get(DoubleTy, ConstantVector.size()); Constant *GlobalArrayInitializer = ConstantArray::get(ArrayTy, ConstantVector); @@ -155,36 +140,31 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, /*isConstant=*/true, GlobalValue::InternalLinkage, GlobalArrayInitializer, ".promoted_doubles." + F.getName()); - // Create GEP for the base pointer in the function entry. - IRBuilder<> EntryBuilder(&F.getEntryBlock().front()); - Value *BasePtr = EntryBuilder.CreateConstInBoundsGEP2_64( - GlobalArray->getValueType(), GlobalArray, 0, 0, "doubles.base"); - // A cache to hold the loaded value for a given constant within a basic block. DenseMap, Value *> LocalLoads; // Replace all uses with the loaded value. - for (Constant *ConstVal : ConstantVector) { - auto *Const = cast(ConstVal); - const auto &Uses = UsesInFunc.at(Const); - unsigned Idx = ConstantMap.at(Const); + unsigned Idx = 0; + for (auto const &Pair : ConstUsesMap) { + ConstantFP *Const = Pair.first; + const SmallVector &Uses = Pair.second; for (Use *U : Uses) { Instruction *UserInst = cast(U->getUser()); BasicBlock *InsertionBB; - Instruction *InsertionPt; + BasicBlock::iterator InsertionPt; if (auto *PN = dyn_cast(UserInst)) { // If the user is a PHI node, we must insert the load in the - // corresponding predecessor basic block, before its terminator. + // corresponding predecessor basic block. unsigned OperandIdx = U->getOperandNo(); InsertionBB = PN->getIncomingBlock(OperandIdx); - InsertionPt = InsertionBB->getTerminator(); } else { - // For any other instruction, we can insert the load right before it. InsertionBB = UserInst->getParent(); - InsertionPt = UserInst; } + // It is always safe to insert in the first insertion point in the BB, + // so do that and let other passes reorder. + InsertionPt = InsertionBB->getFirstInsertionPt(); auto CacheKey = std::make_pair(Const, InsertionBB); Value *LoadedVal = nullptr; @@ -194,9 +174,9 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, LoadedVal = LocalLoads.at(CacheKey); } else { // Otherwise, create a new GEP and Load at the correct insertion point. - IRBuilder<> Builder(InsertionPt); - Value *ElementPtr = Builder.CreateConstInBoundsGEP1_64( - DoubleTy, BasePtr, Idx, "double.addr"); + IRBuilder<> Builder(InsertionBB, InsertionPt); + Value *ElementPtr = Builder.CreateConstInBoundsGEP2_64( + GlobalArray->getValueType(), GlobalArray, 0, Idx, "double.addr"); LoadedVal = Builder.CreateLoad(DoubleTy, ElementPtr, "double.val"); // Cache the newly created load for this block. @@ -206,6 +186,7 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, U->set(LoadedVal); ++NumPromotedUses; } + ++Idx; } return true; diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 1c0f4d3ad041b..0551887430869 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -139,6 +139,7 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() { initializeRISCVExpandAtomicPseudoPass(*PR); initializeRISCVRedundantCopyEliminationPass(*PR); initializeRISCVAsmPrinterPass(*PR); + initializeRISCVPromoteConstantPass(*PR); } static Reloc::Model getEffectiveRelocModel(const Triple &TT, diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll index a982de8301e2c..769823d1c4216 100644 --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -75,7 +75,7 @@ ; CHECK-NEXT: CodeGen Prepare ; CHECK-NEXT: Dominator Tree Construction ; CHECK-NEXT: Exception handling preparation -; CHECK-NEXT: RISC-V Promote Constant +; CHECK-NEXT: RISC-V Promote Constants ; CHECK-NEXT: A No-Op Barrier Pass ; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Merge internal globals diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll index 3d1b0d8cc9658..6f7c30edba3ea 100644 --- a/llvm/test/CodeGen/RISCV/double-imm.ll +++ b/llvm/test/CodeGen/RISCV/double-imm.ll @@ -17,8 +17,8 @@ define double @double_imm() nounwind { ; ; CHECK64D-LABEL: double_imm: ; CHECK64D: # %bb.0: -; CHECK64D-NEXT: lui a0, %hi(.promoted_doubles.double_imm) -; CHECK64D-NEXT: fld fa0, %lo(.promoted_doubles.double_imm)(a0) +; CHECK64D-NEXT: lui a0, %hi(.LCPI0_0) +; CHECK64D-NEXT: fld fa0, %lo(.LCPI0_0)(a0) ; CHECK64D-NEXT: ret ; ; CHECKRV32ZDINX-LABEL: double_imm: @@ -31,8 +31,8 @@ define double @double_imm() nounwind { ; ; CHECKRV64ZDINX-LABEL: double_imm: ; CHECKRV64ZDINX: # %bb.0: -; CHECKRV64ZDINX-NEXT: lui a0, %hi(.promoted_doubles.double_imm) -; CHECKRV64ZDINX-NEXT: ld a0, %lo(.promoted_doubles.double_imm)(a0) +; CHECKRV64ZDINX-NEXT: lui a0, %hi(.LCPI0_0) +; CHECKRV64ZDINX-NEXT: ld a0, %lo(.LCPI0_0)(a0) ; CHECKRV64ZDINX-NEXT: ret ret double 3.1415926535897931159979634685441851615905761718750 } diff --git a/llvm/test/CodeGen/RISCV/double-zfa.ll b/llvm/test/CodeGen/RISCV/double-zfa.ll index 89f8a8dddb50c..f17c63ddb6cae 100644 --- a/llvm/test/CodeGen/RISCV/double-zfa.ll +++ b/llvm/test/CodeGen/RISCV/double-zfa.ll @@ -103,33 +103,21 @@ define double @loadfpimm10() { ; Negative test. This is a qnan with payload of 1. define double @loadfpimm11() { -; RV32IDZFA-LABEL: loadfpimm11: -; RV32IDZFA: # %bb.0: -; RV32IDZFA-NEXT: lui a0, %hi(.LCPI10_0) -; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI10_0)(a0) -; RV32IDZFA-NEXT: ret -; -; RV64DZFA-LABEL: loadfpimm11: -; RV64DZFA: # %bb.0: -; RV64DZFA-NEXT: lui a0, %hi(.promoted_doubles.loadfpimm11) -; RV64DZFA-NEXT: fld fa0, %lo(.promoted_doubles.loadfpimm11)(a0) -; RV64DZFA-NEXT: ret +; CHECK-LABEL: loadfpimm11: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI10_0) +; CHECK-NEXT: fld fa0, %lo(.LCPI10_0)(a0) +; CHECK-NEXT: ret ret double 0x7ff8000000000001 } ; Negative test. This is an snan with payload of 1. define double @loadfpimm12() { -; RV32IDZFA-LABEL: loadfpimm12: -; RV32IDZFA: # %bb.0: -; RV32IDZFA-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI11_0)(a0) -; RV32IDZFA-NEXT: ret -; -; RV64DZFA-LABEL: loadfpimm12: -; RV64DZFA: # %bb.0: -; RV64DZFA-NEXT: lui a0, %hi(.promoted_doubles.loadfpimm12) -; RV64DZFA-NEXT: fld fa0, %lo(.promoted_doubles.loadfpimm12)(a0) -; RV64DZFA-NEXT: ret +; CHECK-LABEL: loadfpimm12: +; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, %hi(.LCPI11_0) +; CHECK-NEXT: fld fa0, %lo(.LCPI11_0)(a0) +; CHECK-NEXT: ret ret double 0x7ff0000000000001 } diff --git a/llvm/test/CodeGen/RISCV/riscv-promote-constant.ll b/llvm/test/CodeGen/RISCV/riscv-promote-constant.ll new file mode 100644 index 0000000000000..2bde6013b3640 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/riscv-promote-constant.ll @@ -0,0 +1,148 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6 +; RUN: opt %s -S -riscv-promote-const -mtriple=riscv64 -mattr=+d | FileCheck %s + +; No promotion should take place, as the pass skips floats. +define float @multiple_floats(float %a, float %b) { +; CHECK-LABEL: define float @multiple_floats( +; CHECK-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[ADD1:%.*]] = fadd float [[A]], 1.000000e+00 +; CHECK-NEXT: [[ADD2:%.*]] = fadd float [[B]], 2.000000e+00 +; CHECK-NEXT: [[SUM_F:%.*]] = fadd float [[ADD1]], [[ADD2]] +; CHECK-NEXT: ret float [[SUM_F]] +; +entry: + %add1 = fadd float %a, 1.0 + %add2 = fadd float %b, 2.0 + %sum_f = fadd float %add1, %add2 + ret float %sum_f +} + +; No promotion should take place as cases with a single constant are skipped. +define double @single_double(double %a) { +; CHECK-LABEL: define double @single_double( +; CHECK-SAME: double [[A:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[ADD:%.*]] = fadd double [[A]], 4.210000e+01 +; CHECK-NEXT: ret double [[ADD]] +; +entry: + %add = fadd double %a, 42.1 + ret double %add +} + +; Promotion should happen as we have at least two unique constants that would +; otherwise go in the constant pool. +define double @multiple_doubles(double %a, double %b) { +; CHECK-LABEL: define double @multiple_doubles( +; CHECK-SAME: double [[A:%.*]], double [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[DOUBLE_VAL1:%.*]] = load double, ptr getelementptr inbounds ([2 x double], ptr @.promoted_doubles.multiple_doubles, i64 0, i64 1), align 8 +; CHECK-NEXT: [[ADD3:%.*]] = load double, ptr @.promoted_doubles.multiple_doubles, align 8 +; CHECK-NEXT: [[ADD2:%.*]] = fadd double [[A]], [[ADD3]] +; CHECK-NEXT: [[ADD4:%.*]] = fadd double [[B]], [[DOUBLE_VAL1]] +; CHECK-NEXT: [[SUM:%.*]] = fadd double [[ADD2]], [[ADD3]] +; CHECK-NEXT: [[SUM1:%.*]] = fadd double [[ADD4]], [[SUM]] +; CHECK-NEXT: ret double [[SUM1]] +; +entry: + %add1 = fadd double %a, 2.718 + %add2 = fadd double %b, 42.1 + %add3 = fadd double %add1, 2.718 + %sum = fadd double %add2, %add3 + ret double %sum +} + +; Promotion should not happen as the constants will be materialised rather +; than using the constant pool. +define double @multiple_doubles_no_promote(double %a, double %b) { +; CHECK-LABEL: define double @multiple_doubles_no_promote( +; CHECK-SAME: double [[A:%.*]], double [[B:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[ADD1:%.*]] = fadd double [[A]], 1.000000e+00 +; CHECK-NEXT: [[ADD2:%.*]] = fadd double [[B]], 2.000000e+00 +; CHECK-NEXT: [[ADD3:%.*]] = fadd double [[ADD1]], 1.000000e+00 +; CHECK-NEXT: [[SUM:%.*]] = fadd double [[ADD2]], [[ADD3]] +; CHECK-NEXT: ret double [[SUM]] +; +entry: + %add1 = fadd double %a, 1.0 + %add2 = fadd double %b, 2.0 + %add3 = fadd double %add1, 1.0 + %sum = fadd double %add2, %add3 + ret double %sum +} + +; The same constant shouldn't be loaded more than once per BB. +define double @multiple_doubles_multi_bb(double %a, i1 %cond) { +; CHECK-LABEL: define double @multiple_doubles_multi_bb( +; CHECK-SAME: double [[A:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br i1 [[COND]], label %[[IF_TRUE:.*]], label %[[IF_FALSE:.*]] +; CHECK: [[IF_TRUE]]: +; CHECK-NEXT: [[DOUBLE_VAL2:%.*]] = load double, ptr getelementptr inbounds ([2 x double], ptr @.promoted_doubles.multiple_doubles_multi_bb, i64 0, i64 1), align 8 +; CHECK-NEXT: [[DOUBLE_VAL:%.*]] = load double, ptr @.promoted_doubles.multiple_doubles_multi_bb, align 8 +; CHECK-NEXT: [[ADD_T:%.*]] = fadd double [[A]], [[DOUBLE_VAL]] +; CHECK-NEXT: [[MUL_T:%.*]] = fmul double [[ADD_T]], [[DOUBLE_VAL2]] +; CHECK-NEXT: [[SUB_T:%.*]] = fsub double [[MUL_T]], [[DOUBLE_VAL]] +; CHECK-NEXT: br label %[[IF_END:.*]] +; CHECK: [[IF_FALSE]]: +; CHECK-NEXT: [[DOUBLE_VAL3:%.*]] = load double, ptr getelementptr inbounds ([2 x double], ptr @.promoted_doubles.multiple_doubles_multi_bb, i64 0, i64 1), align 8 +; CHECK-NEXT: [[DOUBLE_VAL1:%.*]] = load double, ptr @.promoted_doubles.multiple_doubles_multi_bb, align 8 +; CHECK-NEXT: [[ADD_F:%.*]] = fadd double [[A]], [[DOUBLE_VAL1]] +; CHECK-NEXT: [[MUL_F:%.*]] = fmul double [[ADD_F]], [[DOUBLE_VAL3]] +; CHECK-NEXT: [[SUB_F:%.*]] = fsub double [[MUL_F]], [[DOUBLE_VAL1]] +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[PHI_RES:%.*]] = phi double [ [[SUB_T]], %[[IF_TRUE]] ], [ [[SUB_F]], %[[IF_FALSE]] ] +; CHECK-NEXT: ret double [[PHI_RES]] +; +entry: + br i1 %cond, label %if.true, label %if.false + +if.true: + %add.t = fadd double %a, 1.23 + %mul.t = fmul double %add.t, 4.56 + %sub.t = fsub double %mul.t, 1.23 + br label %if.end + +if.false: + %add.f = fadd double %a, 1.23 + %mul.f = fmul double %add.f, 4.56 + %sub.f = fsub double %mul.f, 1.23 + br label %if.end + +if.end: + %phi.res = phi double [ %sub.t, %if.true ], [ %sub.f, %if.false ] + ret double %phi.res +} + +; Check the insertion point in the case we have a phi taking a constant C and +; the source block also uses that same constant. +define double @multiple_doubles_phi(double %a, i1 %cond) { +; CHECK-LABEL: define double @multiple_doubles_phi( +; CHECK-SAME: double [[A:%.*]], i1 [[COND:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[IF_END:.*]] +; CHECK: [[IF_THEN]]: +; CHECK-NEXT: [[DOUBLE_VAL:%.*]] = load double, ptr @.promoted_doubles.multiple_doubles_phi, align 8 +; CHECK-NEXT: [[MUL:%.*]] = fmul double [[A]], [[DOUBLE_VAL]] +; CHECK-NEXT: br label %[[IF_END]] +; CHECK: [[IF_END]]: +; CHECK-NEXT: [[PHI_VAL:%.*]] = phi double [ [[DOUBLE_VAL]], %[[IF_THEN]] ], [ [[A]], %[[ENTRY]] ] +; CHECK-NEXT: [[DOUBLE_VAL1:%.*]] = load double, ptr getelementptr inbounds ([2 x double], ptr @.promoted_doubles.multiple_doubles_phi, i64 0, i64 1), align 8 +; CHECK-NEXT: [[RES:%.*]] = fadd double [[PHI_VAL]], [[DOUBLE_VAL1]] +; CHECK-NEXT: ret double [[RES]] +; +entry: + br i1 %cond, label %if.then, label %if.end + +if.then: + %mul = fmul double %a, 1.23 + br label %if.end + +if.end: + %phi.val = phi double [ 1.23, %if.then ], [ %a, %entry ] + %res = fadd double %phi.val, 4.56 + ret double %res +} diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll index c6d35172f26d2..d68ebefdec520 100644 --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -2945,24 +2945,24 @@ define void @va5_aligned_stack_caller() nounwind { ; RV64D-LP64F: # %bb.0: ; RV64D-LP64F-NEXT: addi sp, sp, -48 ; RV64D-LP64F-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: lui a0, %hi(.promoted_doubles.va5_aligned_stack_caller) -; RV64D-LP64F-NEXT: fld fa5, %lo(.promoted_doubles.va5_aligned_stack_caller)(a0) ; RV64D-LP64F-NEXT: li t0, 17 ; RV64D-LP64F-NEXT: li t1, 16 ; RV64D-LP64F-NEXT: li t2, 15 ; RV64D-LP64F-NEXT: lui a2, %hi(.LCPI11_0) ; RV64D-LP64F-NEXT: lui a3, %hi(.LCPI11_1) -; RV64D-LP64F-NEXT: lui a6, 2384 +; RV64D-LP64F-NEXT: lui a6, %hi(.LCPI11_2) +; RV64D-LP64F-NEXT: lui t3, 2384 ; RV64D-LP64F-NEXT: li a0, 1 ; RV64D-LP64F-NEXT: li a1, 11 ; RV64D-LP64F-NEXT: li a4, 12 ; RV64D-LP64F-NEXT: li a5, 13 ; RV64D-LP64F-NEXT: li a7, 14 -; RV64D-LP64F-NEXT: ld a2, %lo(.LCPI11_0)(a2) -; RV64D-LP64F-NEXT: ld a3, %lo(.LCPI11_1)(a3) -; RV64D-LP64F-NEXT: addi a6, a6, 761 +; RV64D-LP64F-NEXT: ld t4, %lo(.LCPI11_0)(a2) +; RV64D-LP64F-NEXT: ld a2, %lo(.LCPI11_1)(a3) +; RV64D-LP64F-NEXT: ld a3, %lo(.LCPI11_2)(a6) +; RV64D-LP64F-NEXT: addi a6, t3, 761 ; RV64D-LP64F-NEXT: slli a6, a6, 11 -; RV64D-LP64F-NEXT: fsd fa5, 0(sp) +; RV64D-LP64F-NEXT: sd t4, 0(sp) ; RV64D-LP64F-NEXT: sd t2, 8(sp) ; RV64D-LP64F-NEXT: sd t1, 16(sp) ; RV64D-LP64F-NEXT: sd t0, 24(sp) @@ -2975,24 +2975,24 @@ define void @va5_aligned_stack_caller() nounwind { ; RV64D-LP64D: # %bb.0: ; RV64D-LP64D-NEXT: addi sp, sp, -48 ; RV64D-LP64D-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: lui a0, %hi(.promoted_doubles.va5_aligned_stack_caller) -; RV64D-LP64D-NEXT: fld fa5, %lo(.promoted_doubles.va5_aligned_stack_caller)(a0) ; RV64D-LP64D-NEXT: li t0, 17 ; RV64D-LP64D-NEXT: li t1, 16 ; RV64D-LP64D-NEXT: li t2, 15 ; RV64D-LP64D-NEXT: lui a2, %hi(.LCPI11_0) ; RV64D-LP64D-NEXT: lui a3, %hi(.LCPI11_1) -; RV64D-LP64D-NEXT: lui a6, 2384 +; RV64D-LP64D-NEXT: lui a6, %hi(.LCPI11_2) +; RV64D-LP64D-NEXT: lui t3, 2384 ; RV64D-LP64D-NEXT: li a0, 1 ; RV64D-LP64D-NEXT: li a1, 11 ; RV64D-LP64D-NEXT: li a4, 12 ; RV64D-LP64D-NEXT: li a5, 13 ; RV64D-LP64D-NEXT: li a7, 14 -; RV64D-LP64D-NEXT: ld a2, %lo(.LCPI11_0)(a2) -; RV64D-LP64D-NEXT: ld a3, %lo(.LCPI11_1)(a3) -; RV64D-LP64D-NEXT: addi a6, a6, 761 +; RV64D-LP64D-NEXT: ld t4, %lo(.LCPI11_0)(a2) +; RV64D-LP64D-NEXT: ld a2, %lo(.LCPI11_1)(a3) +; RV64D-LP64D-NEXT: ld a3, %lo(.LCPI11_2)(a6) +; RV64D-LP64D-NEXT: addi a6, t3, 761 ; RV64D-LP64D-NEXT: slli a6, a6, 11 -; RV64D-LP64D-NEXT: fsd fa5, 0(sp) +; RV64D-LP64D-NEXT: sd t4, 0(sp) ; RV64D-LP64D-NEXT: sd t2, 8(sp) ; RV64D-LP64D-NEXT: sd t1, 16(sp) ; RV64D-LP64D-NEXT: sd t0, 24(sp) diff --git a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll index 567db1c85c594..c561b6ddb1add 100644 --- a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll +++ b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll @@ -203,8 +203,8 @@ define void @foo6(ptr %p, double %d) nounwind { ; ; RV64ZDINX-LABEL: foo6: ; RV64ZDINX: # %bb.0: # %entry -; RV64ZDINX-NEXT: lui a2, %hi(.promoted_doubles.foo6) -; RV64ZDINX-NEXT: ld a2, %lo(.promoted_doubles.foo6)(a2) +; RV64ZDINX-NEXT: lui a2, %hi(.LCPI5_0) +; RV64ZDINX-NEXT: ld a2, %lo(.LCPI5_0)(a2) ; RV64ZDINX-NEXT: fadd.d a1, a1, a2 ; RV64ZDINX-NEXT: sd a1, 2044(a0) ; RV64ZDINX-NEXT: ret From 431e132132b9837ab61d6b6705948615dfb82c6e Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Wed, 8 Oct 2025 15:22:31 +0100 Subject: [PATCH 4/5] Revert unneded test change and address review comments pt1 --- llvm/lib/Target/RISCV/RISCV.h | 2 +- .../lib/Target/RISCV/RISCVPromoteConstant.cpp | 18 +- llvm/test/CodeGen/RISCV/vararg.ll | 1144 ++++------------- 3 files changed, 290 insertions(+), 874 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h index d43b192d9e5c7..51e8e8574ed15 100644 --- a/llvm/lib/Target/RISCV/RISCV.h +++ b/llvm/lib/Target/RISCV/RISCV.h @@ -19,8 +19,8 @@ namespace llvm { class FunctionPass; -class ModulePass; class InstructionSelector; +class ModulePass; class PassRegistry; class RISCVRegisterBankInfo; class RISCVSubtarget; diff --git a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp index 561269c33d256..89513a4c2e7ae 100644 --- a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp +++ b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp @@ -55,11 +55,11 @@ class RISCVPromoteConstant : public ModulePass { /// Iterate over the functions and promote the double fp constants that /// would otherwise go into the constant pool to a constant array. bool runOnModule(Module &M) override { + if (skipModule(M)) + return false; // TargetMachine and Subtarget are needed to query isFPImmlegal. const TargetPassConfig &TPC = getAnalysis(); const TargetMachine &TM = TPC.getTM(); - if (skipModule(M)) - return false; bool Changed = false; for (Function &F : M) { const RISCVSubtarget &ST = TM.getSubtarget(F); @@ -154,14 +154,14 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, BasicBlock *InsertionBB; BasicBlock::iterator InsertionPt; - if (auto *PN = dyn_cast(UserInst)) { - // If the user is a PHI node, we must insert the load in the - // corresponding predecessor basic block. - unsigned OperandIdx = U->getOperandNo(); - InsertionBB = PN->getIncomingBlock(OperandIdx); - } else { + // If the user is a PHI node, we must insert the load in the + // corresponding predecessor basic block. Otherwise, it's inserted into + // the same block as the use. + if (auto *PN = dyn_cast(UserInst)) + InsertionBB = PN->getIncomingBlock(*U); + else InsertionBB = UserInst->getParent(); - } + // It is always safe to insert in the first insertion point in the BB, // so do that and let other passes reorder. InsertionPt = InsertionBB->getFirstInsertionPt(); diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll index d68ebefdec520..3dd99f3d49d2d 100644 --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -18,13 +18,13 @@ ; RUN: -verify-machineinstrs \ ; RUN: | FileCheck -check-prefix=ILP32E-WITHFP %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs \ -; RUN: | FileCheck -check-prefixes=RV64,LP64-LP64F-LP64D-FPELIM %s +; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64f \ ; RUN: -verify-machineinstrs \ -; RUN: | FileCheck -check-prefixes=RV64D-LP64F,LP64-LP64F-LP64D-FPELIM %s +; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d -target-abi lp64d \ ; RUN: -verify-machineinstrs \ -; RUN: | FileCheck -check-prefixes=RV64D-LP64D,LP64-LP64F-LP64D-FPELIM %s +; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-FPELIM %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -verify-machineinstrs -frame-pointer=all \ ; RUN: | FileCheck -check-prefix=LP64-LP64F-LP64D-WITHFP %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -target-abi lp64e \ @@ -158,59 +158,23 @@ define i32 @va1(ptr %fmt, ...) { ; ILP32E-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va1: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -80 -; RV64-NEXT: .cfi_def_cfa_offset 80 -; RV64-NEXT: sd a1, 24(sp) -; RV64-NEXT: addi a0, sp, 28 -; RV64-NEXT: sd a0, 8(sp) -; RV64-NEXT: lw a0, 24(sp) -; RV64-NEXT: sd a5, 56(sp) -; RV64-NEXT: sd a6, 64(sp) -; RV64-NEXT: sd a7, 72(sp) -; RV64-NEXT: sd a2, 32(sp) -; RV64-NEXT: sd a3, 40(sp) -; RV64-NEXT: sd a4, 48(sp) -; RV64-NEXT: addi sp, sp, 80 -; RV64-NEXT: .cfi_def_cfa_offset 0 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va1: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -80 -; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 80 -; RV64D-LP64F-NEXT: sd a1, 24(sp) -; RV64D-LP64F-NEXT: addi a0, sp, 28 -; RV64D-LP64F-NEXT: sd a0, 8(sp) -; RV64D-LP64F-NEXT: lw a0, 24(sp) -; RV64D-LP64F-NEXT: sd a5, 56(sp) -; RV64D-LP64F-NEXT: sd a6, 64(sp) -; RV64D-LP64F-NEXT: sd a7, 72(sp) -; RV64D-LP64F-NEXT: sd a2, 32(sp) -; RV64D-LP64F-NEXT: sd a3, 40(sp) -; RV64D-LP64F-NEXT: sd a4, 48(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 80 -; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 0 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va1: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -80 -; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 80 -; RV64D-LP64D-NEXT: sd a1, 24(sp) -; RV64D-LP64D-NEXT: addi a0, sp, 28 -; RV64D-LP64D-NEXT: sd a0, 8(sp) -; RV64D-LP64D-NEXT: lw a0, 24(sp) -; RV64D-LP64D-NEXT: sd a5, 56(sp) -; RV64D-LP64D-NEXT: sd a6, 64(sp) -; RV64D-LP64D-NEXT: sd a7, 72(sp) -; RV64D-LP64D-NEXT: sd a2, 32(sp) -; RV64D-LP64D-NEXT: sd a3, 40(sp) -; RV64D-LP64D-NEXT: sd a4, 48(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 80 -; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 0 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va1: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 28 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -380,53 +344,21 @@ define i32 @va1_va_arg(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va1_va_arg: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -80 -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: sd a5, 56(sp) -; RV64-NEXT: sd a6, 64(sp) -; RV64-NEXT: sd a7, 72(sp) -; RV64-NEXT: sd a1, 24(sp) -; RV64-NEXT: sd a2, 32(sp) -; RV64-NEXT: sd a3, 40(sp) -; RV64-NEXT: sd a4, 48(sp) -; RV64-NEXT: addi a1, sp, 32 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: addi sp, sp, 80 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va1_va_arg: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -80 -; RV64D-LP64F-NEXT: mv a0, a1 -; RV64D-LP64F-NEXT: sd a5, 56(sp) -; RV64D-LP64F-NEXT: sd a6, 64(sp) -; RV64D-LP64F-NEXT: sd a7, 72(sp) -; RV64D-LP64F-NEXT: sd a1, 24(sp) -; RV64D-LP64F-NEXT: sd a2, 32(sp) -; RV64D-LP64F-NEXT: sd a3, 40(sp) -; RV64D-LP64F-NEXT: sd a4, 48(sp) -; RV64D-LP64F-NEXT: addi a1, sp, 32 -; RV64D-LP64F-NEXT: sd a1, 8(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 80 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va1_va_arg: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -80 -; RV64D-LP64D-NEXT: mv a0, a1 -; RV64D-LP64D-NEXT: sd a5, 56(sp) -; RV64D-LP64D-NEXT: sd a6, 64(sp) -; RV64D-LP64D-NEXT: sd a7, 72(sp) -; RV64D-LP64D-NEXT: sd a1, 24(sp) -; RV64D-LP64D-NEXT: sd a2, 32(sp) -; RV64D-LP64D-NEXT: sd a3, 40(sp) -; RV64D-LP64D-NEXT: sd a4, 48(sp) -; RV64D-LP64D-NEXT: addi a1, sp, 32 -; RV64D-LP64D-NEXT: sd a1, 8(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 80 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 +; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -637,101 +569,37 @@ define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 40 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va1_va_arg_alloca: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -96 -; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: addi s0, sp, 32 -; RV64-NEXT: mv s1, a1 -; RV64-NEXT: sd a5, 40(s0) -; RV64-NEXT: sd a6, 48(s0) -; RV64-NEXT: sd a7, 56(s0) -; RV64-NEXT: sd a1, 8(s0) -; RV64-NEXT: sd a2, 16(s0) -; RV64-NEXT: sd a3, 24(s0) -; RV64-NEXT: sd a4, 32(s0) -; RV64-NEXT: addi a0, s0, 16 -; RV64-NEXT: sd a0, -32(s0) -; RV64-NEXT: slli a0, a1, 32 -; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: addi a0, a0, 15 -; RV64-NEXT: andi a0, a0, -16 -; RV64-NEXT: sub a0, sp, a0 -; RV64-NEXT: mv sp, a0 -; RV64-NEXT: call notdead -; RV64-NEXT: mv a0, s1 -; RV64-NEXT: addi sp, s0, -32 -; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 96 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va1_va_arg_alloca: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -96 -; RV64D-LP64F-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: addi s0, sp, 32 -; RV64D-LP64F-NEXT: mv s1, a1 -; RV64D-LP64F-NEXT: sd a5, 40(s0) -; RV64D-LP64F-NEXT: sd a6, 48(s0) -; RV64D-LP64F-NEXT: sd a7, 56(s0) -; RV64D-LP64F-NEXT: sd a1, 8(s0) -; RV64D-LP64F-NEXT: sd a2, 16(s0) -; RV64D-LP64F-NEXT: sd a3, 24(s0) -; RV64D-LP64F-NEXT: sd a4, 32(s0) -; RV64D-LP64F-NEXT: addi a0, s0, 16 -; RV64D-LP64F-NEXT: sd a0, -32(s0) -; RV64D-LP64F-NEXT: slli a0, a1, 32 -; RV64D-LP64F-NEXT: srli a0, a0, 32 -; RV64D-LP64F-NEXT: addi a0, a0, 15 -; RV64D-LP64F-NEXT: andi a0, a0, -16 -; RV64D-LP64F-NEXT: sub a0, sp, a0 -; RV64D-LP64F-NEXT: mv sp, a0 -; RV64D-LP64F-NEXT: call notdead -; RV64D-LP64F-NEXT: mv a0, s1 -; RV64D-LP64F-NEXT: addi sp, s0, -32 -; RV64D-LP64F-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: addi sp, sp, 96 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va1_va_arg_alloca: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -96 -; RV64D-LP64D-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: sd s1, 8(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: addi s0, sp, 32 -; RV64D-LP64D-NEXT: mv s1, a1 -; RV64D-LP64D-NEXT: sd a5, 40(s0) -; RV64D-LP64D-NEXT: sd a6, 48(s0) -; RV64D-LP64D-NEXT: sd a7, 56(s0) -; RV64D-LP64D-NEXT: sd a1, 8(s0) -; RV64D-LP64D-NEXT: sd a2, 16(s0) -; RV64D-LP64D-NEXT: sd a3, 24(s0) -; RV64D-LP64D-NEXT: sd a4, 32(s0) -; RV64D-LP64D-NEXT: addi a0, s0, 16 -; RV64D-LP64D-NEXT: sd a0, -32(s0) -; RV64D-LP64D-NEXT: slli a0, a1, 32 -; RV64D-LP64D-NEXT: srli a0, a0, 32 -; RV64D-LP64D-NEXT: addi a0, a0, 15 -; RV64D-LP64D-NEXT: andi a0, a0, -16 -; RV64D-LP64D-NEXT: sub a0, sp, a0 -; RV64D-LP64D-NEXT: mv sp, a0 -; RV64D-LP64D-NEXT: call notdead -; RV64D-LP64D-NEXT: mv a0, s1 -; RV64D-LP64D-NEXT: addi sp, s0, -32 -; RV64D-LP64D-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: ld s1, 8(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: addi sp, sp, 96 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va1_va_arg_alloca: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -96 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: addi s0, sp, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: mv s1, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, s0, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, -32(s0) +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a1, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: srli a0, a0, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 15 +; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -16 +; LP64-LP64F-LP64D-FPELIM-NEXT: sub a0, sp, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: mv sp, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead +; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, s1 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, s0, -32 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 96 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_va_arg_alloca: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -901,41 +769,17 @@ define void @va1_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 8 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va1_caller: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: li a1, 1023 -; RV64-NEXT: slli a1, a1, 52 -; RV64-NEXT: li a2, 2 -; RV64-NEXT: call va1 -; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va1_caller: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -16 -; RV64D-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: li a1, 1023 -; RV64D-LP64F-NEXT: slli a1, a1, 52 -; RV64D-LP64F-NEXT: li a2, 2 -; RV64D-LP64F-NEXT: call va1 -; RV64D-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: addi sp, sp, 16 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va1_caller: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -16 -; RV64D-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: li a1, 1023 -; RV64D-LP64D-NEXT: slli a1, a1, 52 -; RV64D-LP64D-NEXT: li a2, 2 -; RV64D-LP64D-NEXT: call va1 -; RV64D-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: addi sp, sp, 16 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va1_caller: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2 +; LP64-LP64F-LP64D-FPELIM-NEXT: call va1 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va1_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1097,53 +941,21 @@ define i64 @va2(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va2: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -80 -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: sd a5, 56(sp) -; RV64-NEXT: sd a6, 64(sp) -; RV64-NEXT: sd a7, 72(sp) -; RV64-NEXT: sd a1, 24(sp) -; RV64-NEXT: sd a2, 32(sp) -; RV64-NEXT: sd a3, 40(sp) -; RV64-NEXT: sd a4, 48(sp) -; RV64-NEXT: addi a1, sp, 39 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: addi sp, sp, 80 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va2: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -80 -; RV64D-LP64F-NEXT: mv a0, a1 -; RV64D-LP64F-NEXT: sd a5, 56(sp) -; RV64D-LP64F-NEXT: sd a6, 64(sp) -; RV64D-LP64F-NEXT: sd a7, 72(sp) -; RV64D-LP64F-NEXT: sd a1, 24(sp) -; RV64D-LP64F-NEXT: sd a2, 32(sp) -; RV64D-LP64F-NEXT: sd a3, 40(sp) -; RV64D-LP64F-NEXT: sd a4, 48(sp) -; RV64D-LP64F-NEXT: addi a1, sp, 39 -; RV64D-LP64F-NEXT: sd a1, 8(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 80 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va2: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -80 -; RV64D-LP64D-NEXT: mv a0, a1 -; RV64D-LP64D-NEXT: sd a5, 56(sp) -; RV64D-LP64D-NEXT: sd a6, 64(sp) -; RV64D-LP64D-NEXT: sd a7, 72(sp) -; RV64D-LP64D-NEXT: sd a1, 24(sp) -; RV64D-LP64D-NEXT: sd a2, 32(sp) -; RV64D-LP64D-NEXT: sd a3, 40(sp) -; RV64D-LP64D-NEXT: sd a4, 48(sp) -; RV64D-LP64D-NEXT: addi a1, sp, 39 -; RV64D-LP64D-NEXT: sd a1, 8(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 80 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va2: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 +; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 39 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va2: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1326,53 +1138,21 @@ define i64 @va2_va_arg(ptr %fmt, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va2_va_arg: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -80 -; RV64-NEXT: mv a0, a1 -; RV64-NEXT: sd a5, 56(sp) -; RV64-NEXT: sd a6, 64(sp) -; RV64-NEXT: sd a7, 72(sp) -; RV64-NEXT: sd a1, 24(sp) -; RV64-NEXT: sd a2, 32(sp) -; RV64-NEXT: sd a3, 40(sp) -; RV64-NEXT: sd a4, 48(sp) -; RV64-NEXT: addi a1, sp, 32 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: addi sp, sp, 80 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va2_va_arg: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -80 -; RV64D-LP64F-NEXT: mv a0, a1 -; RV64D-LP64F-NEXT: sd a5, 56(sp) -; RV64D-LP64F-NEXT: sd a6, 64(sp) -; RV64D-LP64F-NEXT: sd a7, 72(sp) -; RV64D-LP64F-NEXT: sd a1, 24(sp) -; RV64D-LP64F-NEXT: sd a2, 32(sp) -; RV64D-LP64F-NEXT: sd a3, 40(sp) -; RV64D-LP64F-NEXT: sd a4, 48(sp) -; RV64D-LP64F-NEXT: addi a1, sp, 32 -; RV64D-LP64F-NEXT: sd a1, 8(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 80 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va2_va_arg: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -80 -; RV64D-LP64D-NEXT: mv a0, a1 -; RV64D-LP64D-NEXT: sd a5, 56(sp) -; RV64D-LP64D-NEXT: sd a6, 64(sp) -; RV64D-LP64D-NEXT: sd a7, 72(sp) -; RV64D-LP64D-NEXT: sd a1, 24(sp) -; RV64D-LP64D-NEXT: sd a2, 32(sp) -; RV64D-LP64D-NEXT: sd a3, 40(sp) -; RV64D-LP64D-NEXT: sd a4, 48(sp) -; RV64D-LP64D-NEXT: addi a1, sp, 32 -; RV64D-LP64D-NEXT: sd a1, 8(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 80 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va2_va_arg: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 +; LP64-LP64F-LP64D-FPELIM-NEXT: mv a0, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va2_va_arg: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1497,38 +1277,16 @@ define void @va2_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 8 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va2_caller: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: li a1, 1023 -; RV64-NEXT: slli a1, a1, 52 -; RV64-NEXT: call va2 -; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va2_caller: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -16 -; RV64D-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: li a1, 1023 -; RV64D-LP64F-NEXT: slli a1, a1, 52 -; RV64D-LP64F-NEXT: call va2 -; RV64D-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: addi sp, sp, 16 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va2_caller: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -16 -; RV64D-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: li a1, 1023 -; RV64D-LP64D-NEXT: slli a1, a1, 52 -; RV64D-LP64D-NEXT: call va2 -; RV64D-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: addi sp, sp, 16 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va2_caller: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52 +; LP64-LP64F-LP64D-FPELIM-NEXT: call va2 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va2_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1697,50 +1455,20 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 28 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va3: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -64 -; RV64-NEXT: sd a6, 48(sp) -; RV64-NEXT: sd a7, 56(sp) -; RV64-NEXT: sd a2, 16(sp) -; RV64-NEXT: sd a3, 24(sp) -; RV64-NEXT: sd a4, 32(sp) -; RV64-NEXT: sd a5, 40(sp) -; RV64-NEXT: addi a3, sp, 31 -; RV64-NEXT: add a0, a1, a2 -; RV64-NEXT: sd a3, 8(sp) -; RV64-NEXT: addi sp, sp, 64 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va3: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -64 -; RV64D-LP64F-NEXT: sd a6, 48(sp) -; RV64D-LP64F-NEXT: sd a7, 56(sp) -; RV64D-LP64F-NEXT: sd a2, 16(sp) -; RV64D-LP64F-NEXT: sd a3, 24(sp) -; RV64D-LP64F-NEXT: sd a4, 32(sp) -; RV64D-LP64F-NEXT: sd a5, 40(sp) -; RV64D-LP64F-NEXT: addi a3, sp, 31 -; RV64D-LP64F-NEXT: add a0, a1, a2 -; RV64D-LP64F-NEXT: sd a3, 8(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 64 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va3: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -64 -; RV64D-LP64D-NEXT: sd a6, 48(sp) -; RV64D-LP64D-NEXT: sd a7, 56(sp) -; RV64D-LP64D-NEXT: sd a2, 16(sp) -; RV64D-LP64D-NEXT: sd a3, 24(sp) -; RV64D-LP64D-NEXT: sd a4, 32(sp) -; RV64D-LP64D-NEXT: sd a5, 40(sp) -; RV64D-LP64D-NEXT: addi a3, sp, 31 -; RV64D-LP64D-NEXT: add a0, a1, a2 -; RV64D-LP64D-NEXT: sd a3, 8(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 64 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va3: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 31 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va3: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -1931,50 +1659,20 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 28 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va3_va_arg: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -64 -; RV64-NEXT: sd a6, 48(sp) -; RV64-NEXT: sd a7, 56(sp) -; RV64-NEXT: sd a2, 16(sp) -; RV64-NEXT: sd a3, 24(sp) -; RV64-NEXT: sd a4, 32(sp) -; RV64-NEXT: sd a5, 40(sp) -; RV64-NEXT: addi a3, sp, 24 -; RV64-NEXT: add a0, a1, a2 -; RV64-NEXT: sd a3, 8(sp) -; RV64-NEXT: addi sp, sp, 64 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va3_va_arg: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -64 -; RV64D-LP64F-NEXT: sd a6, 48(sp) -; RV64D-LP64F-NEXT: sd a7, 56(sp) -; RV64D-LP64F-NEXT: sd a2, 16(sp) -; RV64D-LP64F-NEXT: sd a3, 24(sp) -; RV64D-LP64F-NEXT: sd a4, 32(sp) -; RV64D-LP64F-NEXT: sd a5, 40(sp) -; RV64D-LP64F-NEXT: addi a3, sp, 24 -; RV64D-LP64F-NEXT: add a0, a1, a2 -; RV64D-LP64F-NEXT: sd a3, 8(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 64 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va3_va_arg: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -64 -; RV64D-LP64D-NEXT: sd a6, 48(sp) -; RV64D-LP64D-NEXT: sd a7, 56(sp) -; RV64D-LP64D-NEXT: sd a2, 16(sp) -; RV64D-LP64D-NEXT: sd a3, 24(sp) -; RV64D-LP64D-NEXT: sd a4, 32(sp) -; RV64D-LP64D-NEXT: sd a5, 40(sp) -; RV64D-LP64D-NEXT: addi a3, sp, 24 -; RV64D-LP64D-NEXT: add a0, a1, a2 -; RV64D-LP64D-NEXT: sd a3, 8(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 64 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va3_va_arg: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 24 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va3_va_arg: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2112,44 +1810,18 @@ define void @va3_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 8 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va3_caller: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -16 -; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-NEXT: li a2, 1 -; RV64-NEXT: li a0, 2 -; RV64-NEXT: slli a2, a2, 62 -; RV64-NEXT: li a1, 1111 -; RV64-NEXT: call va3 -; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 16 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va3_caller: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -16 -; RV64D-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: li a2, 1 -; RV64D-LP64F-NEXT: li a0, 2 -; RV64D-LP64F-NEXT: slli a2, a2, 62 -; RV64D-LP64F-NEXT: li a1, 1111 -; RV64D-LP64F-NEXT: call va3 -; RV64D-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: addi sp, sp, 16 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va3_caller: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -16 -; RV64D-LP64D-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: li a2, 1 -; RV64D-LP64D-NEXT: li a0, 2 -; RV64D-LP64D-NEXT: slli a2, a2, 62 -; RV64D-LP64D-NEXT: li a1, 1111 -; RV64D-LP64D-NEXT: call va3 -; RV64D-LP64D-NEXT: ld ra, 8(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: addi sp, sp, 16 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va3_caller: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 62 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111 +; LP64-LP64F-LP64D-FPELIM-NEXT: call va3 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va3_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2409,128 +2081,46 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 44 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va4_va_copy: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -96 -; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64-NEXT: mv s0, a1 -; RV64-NEXT: sd a5, 72(sp) -; RV64-NEXT: sd a6, 80(sp) -; RV64-NEXT: sd a7, 88(sp) -; RV64-NEXT: sd a1, 40(sp) -; RV64-NEXT: sd a2, 48(sp) -; RV64-NEXT: sd a3, 56(sp) -; RV64-NEXT: sd a4, 64(sp) -; RV64-NEXT: addi a0, sp, 48 -; RV64-NEXT: sd a0, 8(sp) -; RV64-NEXT: sd a0, 0(sp) -; RV64-NEXT: call notdead -; RV64-NEXT: ld a0, 8(sp) -; RV64-NEXT: addi a0, a0, 3 -; RV64-NEXT: andi a0, a0, -4 -; RV64-NEXT: addi a1, a0, 8 -; RV64-NEXT: addi a2, a0, 11 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: andi a2, a2, -4 -; RV64-NEXT: ld a0, 0(a0) -; RV64-NEXT: addi a1, a2, 8 -; RV64-NEXT: addi a3, a2, 11 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: andi a3, a3, -4 -; RV64-NEXT: ld a1, 0(a2) -; RV64-NEXT: addi a2, a3, 8 -; RV64-NEXT: sd a2, 8(sp) -; RV64-NEXT: ld a2, 0(a3) -; RV64-NEXT: add a0, a0, s0 -; RV64-NEXT: add a0, a0, a1 -; RV64-NEXT: addw a0, a0, a2 -; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 96 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va4_va_copy: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -96 -; RV64D-LP64F-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: mv s0, a1 -; RV64D-LP64F-NEXT: sd a5, 72(sp) -; RV64D-LP64F-NEXT: sd a6, 80(sp) -; RV64D-LP64F-NEXT: sd a7, 88(sp) -; RV64D-LP64F-NEXT: sd a1, 40(sp) -; RV64D-LP64F-NEXT: sd a2, 48(sp) -; RV64D-LP64F-NEXT: sd a3, 56(sp) -; RV64D-LP64F-NEXT: sd a4, 64(sp) -; RV64D-LP64F-NEXT: addi a0, sp, 48 -; RV64D-LP64F-NEXT: sd a0, 8(sp) -; RV64D-LP64F-NEXT: sd a0, 0(sp) -; RV64D-LP64F-NEXT: call notdead -; RV64D-LP64F-NEXT: ld a0, 8(sp) -; RV64D-LP64F-NEXT: addi a0, a0, 3 -; RV64D-LP64F-NEXT: andi a0, a0, -4 -; RV64D-LP64F-NEXT: addi a1, a0, 8 -; RV64D-LP64F-NEXT: addi a2, a0, 11 -; RV64D-LP64F-NEXT: sd a1, 8(sp) -; RV64D-LP64F-NEXT: andi a2, a2, -4 -; RV64D-LP64F-NEXT: ld a0, 0(a0) -; RV64D-LP64F-NEXT: addi a1, a2, 8 -; RV64D-LP64F-NEXT: addi a3, a2, 11 -; RV64D-LP64F-NEXT: sd a1, 8(sp) -; RV64D-LP64F-NEXT: andi a3, a3, -4 -; RV64D-LP64F-NEXT: ld a1, 0(a2) -; RV64D-LP64F-NEXT: addi a2, a3, 8 -; RV64D-LP64F-NEXT: sd a2, 8(sp) -; RV64D-LP64F-NEXT: ld a2, 0(a3) -; RV64D-LP64F-NEXT: add a0, a0, s0 -; RV64D-LP64F-NEXT: add a0, a0, a1 -; RV64D-LP64F-NEXT: addw a0, a0, a2 -; RV64D-LP64F-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: addi sp, sp, 96 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va4_va_copy: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -96 -; RV64D-LP64D-NEXT: sd ra, 24(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: sd s0, 16(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: mv s0, a1 -; RV64D-LP64D-NEXT: sd a5, 72(sp) -; RV64D-LP64D-NEXT: sd a6, 80(sp) -; RV64D-LP64D-NEXT: sd a7, 88(sp) -; RV64D-LP64D-NEXT: sd a1, 40(sp) -; RV64D-LP64D-NEXT: sd a2, 48(sp) -; RV64D-LP64D-NEXT: sd a3, 56(sp) -; RV64D-LP64D-NEXT: sd a4, 64(sp) -; RV64D-LP64D-NEXT: addi a0, sp, 48 -; RV64D-LP64D-NEXT: sd a0, 8(sp) -; RV64D-LP64D-NEXT: sd a0, 0(sp) -; RV64D-LP64D-NEXT: call notdead -; RV64D-LP64D-NEXT: ld a0, 8(sp) -; RV64D-LP64D-NEXT: addi a0, a0, 3 -; RV64D-LP64D-NEXT: andi a0, a0, -4 -; RV64D-LP64D-NEXT: addi a1, a0, 8 -; RV64D-LP64D-NEXT: addi a2, a0, 11 -; RV64D-LP64D-NEXT: sd a1, 8(sp) -; RV64D-LP64D-NEXT: andi a2, a2, -4 -; RV64D-LP64D-NEXT: ld a0, 0(a0) -; RV64D-LP64D-NEXT: addi a1, a2, 8 -; RV64D-LP64D-NEXT: addi a3, a2, 11 -; RV64D-LP64D-NEXT: sd a1, 8(sp) -; RV64D-LP64D-NEXT: andi a3, a3, -4 -; RV64D-LP64D-NEXT: ld a1, 0(a2) -; RV64D-LP64D-NEXT: addi a2, a3, 8 -; RV64D-LP64D-NEXT: sd a2, 8(sp) -; RV64D-LP64D-NEXT: ld a2, 0(a3) -; RV64D-LP64D-NEXT: add a0, a0, s0 -; RV64D-LP64D-NEXT: add a0, a0, a1 -; RV64D-LP64D-NEXT: addw a0, a0, a2 -; RV64D-LP64D-NEXT: ld ra, 24(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: ld s0, 16(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: addi sp, sp, 96 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va4_va_copy: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -96 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: mv s0, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 72(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 80(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 88(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 64(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 48 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 0(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 3 +; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a0, 8 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a0, 11 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: andi a2, a2, -4 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a2, 8 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a2, 11 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: andi a3, a3, -4 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a1, 0(a2) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a3, 8 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a2, 0(a3) +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a0, s0 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a0, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: addw a0, a0, a2 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 96 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va4_va_copy: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -2911,95 +2501,35 @@ define void @va5_aligned_stack_caller() nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 64 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va5_aligned_stack_caller: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -48 -; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64-NEXT: li t0, 17 -; RV64-NEXT: li t1, 16 -; RV64-NEXT: li t2, 15 -; RV64-NEXT: lui a2, %hi(.LCPI11_0) -; RV64-NEXT: lui a3, %hi(.LCPI11_1) -; RV64-NEXT: lui a6, %hi(.LCPI11_2) -; RV64-NEXT: lui t3, 2384 -; RV64-NEXT: li a0, 1 -; RV64-NEXT: li a1, 11 -; RV64-NEXT: li a4, 12 -; RV64-NEXT: li a5, 13 -; RV64-NEXT: li a7, 14 -; RV64-NEXT: ld t4, %lo(.LCPI11_0)(a2) -; RV64-NEXT: ld a2, %lo(.LCPI11_1)(a3) -; RV64-NEXT: ld a3, %lo(.LCPI11_2)(a6) -; RV64-NEXT: addi a6, t3, 761 -; RV64-NEXT: slli a6, a6, 11 -; RV64-NEXT: sd t4, 0(sp) -; RV64-NEXT: sd t2, 8(sp) -; RV64-NEXT: sd t1, 16(sp) -; RV64-NEXT: sd t0, 24(sp) -; RV64-NEXT: call va5_aligned_stack_callee -; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; RV64-NEXT: addi sp, sp, 48 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va5_aligned_stack_caller: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -48 -; RV64D-LP64F-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64D-LP64F-NEXT: li t0, 17 -; RV64D-LP64F-NEXT: li t1, 16 -; RV64D-LP64F-NEXT: li t2, 15 -; RV64D-LP64F-NEXT: lui a2, %hi(.LCPI11_0) -; RV64D-LP64F-NEXT: lui a3, %hi(.LCPI11_1) -; RV64D-LP64F-NEXT: lui a6, %hi(.LCPI11_2) -; RV64D-LP64F-NEXT: lui t3, 2384 -; RV64D-LP64F-NEXT: li a0, 1 -; RV64D-LP64F-NEXT: li a1, 11 -; RV64D-LP64F-NEXT: li a4, 12 -; RV64D-LP64F-NEXT: li a5, 13 -; RV64D-LP64F-NEXT: li a7, 14 -; RV64D-LP64F-NEXT: ld t4, %lo(.LCPI11_0)(a2) -; RV64D-LP64F-NEXT: ld a2, %lo(.LCPI11_1)(a3) -; RV64D-LP64F-NEXT: ld a3, %lo(.LCPI11_2)(a6) -; RV64D-LP64F-NEXT: addi a6, t3, 761 -; RV64D-LP64F-NEXT: slli a6, a6, 11 -; RV64D-LP64F-NEXT: sd t4, 0(sp) -; RV64D-LP64F-NEXT: sd t2, 8(sp) -; RV64D-LP64F-NEXT: sd t1, 16(sp) -; RV64D-LP64F-NEXT: sd t0, 24(sp) -; RV64D-LP64F-NEXT: call va5_aligned_stack_callee -; RV64D-LP64F-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; RV64D-LP64F-NEXT: addi sp, sp, 48 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va5_aligned_stack_caller: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -48 -; RV64D-LP64D-NEXT: sd ra, 40(sp) # 8-byte Folded Spill -; RV64D-LP64D-NEXT: li t0, 17 -; RV64D-LP64D-NEXT: li t1, 16 -; RV64D-LP64D-NEXT: li t2, 15 -; RV64D-LP64D-NEXT: lui a2, %hi(.LCPI11_0) -; RV64D-LP64D-NEXT: lui a3, %hi(.LCPI11_1) -; RV64D-LP64D-NEXT: lui a6, %hi(.LCPI11_2) -; RV64D-LP64D-NEXT: lui t3, 2384 -; RV64D-LP64D-NEXT: li a0, 1 -; RV64D-LP64D-NEXT: li a1, 11 -; RV64D-LP64D-NEXT: li a4, 12 -; RV64D-LP64D-NEXT: li a5, 13 -; RV64D-LP64D-NEXT: li a7, 14 -; RV64D-LP64D-NEXT: ld t4, %lo(.LCPI11_0)(a2) -; RV64D-LP64D-NEXT: ld a2, %lo(.LCPI11_1)(a3) -; RV64D-LP64D-NEXT: ld a3, %lo(.LCPI11_2)(a6) -; RV64D-LP64D-NEXT: addi a6, t3, 761 -; RV64D-LP64D-NEXT: slli a6, a6, 11 -; RV64D-LP64D-NEXT: sd t4, 0(sp) -; RV64D-LP64D-NEXT: sd t2, 8(sp) -; RV64D-LP64D-NEXT: sd t1, 16(sp) -; RV64D-LP64D-NEXT: sd t0, 24(sp) -; RV64D-LP64D-NEXT: call va5_aligned_stack_callee -; RV64D-LP64D-NEXT: ld ra, 40(sp) # 8-byte Folded Reload -; RV64D-LP64D-NEXT: addi sp, sp, 48 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va5_aligned_stack_caller: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -48 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 40(sp) # 8-byte Folded Spill +; LP64-LP64F-LP64D-FPELIM-NEXT: li t0, 17 +; LP64-LP64F-LP64D-FPELIM-NEXT: li t1, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: li t2, 15 +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a2, %hi(.LCPI11_0) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a3, %hi(.LCPI11_1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a6, %hi(.LCPI11_2) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui t3, 2384 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 11 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a4, 12 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a5, 13 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a7, 14 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld t4, %lo(.LCPI11_0)(a2) +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a2, %lo(.LCPI11_1)(a3) +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a3, %lo(.LCPI11_2)(a6) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a6, t3, 761 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a6, 11 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd t4, 0(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd t2, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd t1, 16(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd t0, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee +; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 40(sp) # 8-byte Folded Reload +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 48 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va5_aligned_stack_caller: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -3197,53 +2727,21 @@ define i32 @va6_no_fixed_args(...) nounwind { ; ILP32E-WITHFP-NEXT: addi sp, sp, 36 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va6_no_fixed_args: -; RV64: # %bb.0: -; RV64-NEXT: addi sp, sp, -80 -; RV64-NEXT: sd a4, 48(sp) -; RV64-NEXT: sd a5, 56(sp) -; RV64-NEXT: sd a6, 64(sp) -; RV64-NEXT: sd a7, 72(sp) -; RV64-NEXT: sd a0, 16(sp) -; RV64-NEXT: sd a1, 24(sp) -; RV64-NEXT: sd a2, 32(sp) -; RV64-NEXT: sd a3, 40(sp) -; RV64-NEXT: addi a1, sp, 24 -; RV64-NEXT: sd a1, 8(sp) -; RV64-NEXT: addi sp, sp, 80 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va6_no_fixed_args: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: addi sp, sp, -80 -; RV64D-LP64F-NEXT: sd a4, 48(sp) -; RV64D-LP64F-NEXT: sd a5, 56(sp) -; RV64D-LP64F-NEXT: sd a6, 64(sp) -; RV64D-LP64F-NEXT: sd a7, 72(sp) -; RV64D-LP64F-NEXT: sd a0, 16(sp) -; RV64D-LP64F-NEXT: sd a1, 24(sp) -; RV64D-LP64F-NEXT: sd a2, 32(sp) -; RV64D-LP64F-NEXT: sd a3, 40(sp) -; RV64D-LP64F-NEXT: addi a1, sp, 24 -; RV64D-LP64F-NEXT: sd a1, 8(sp) -; RV64D-LP64F-NEXT: addi sp, sp, 80 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va6_no_fixed_args: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: addi sp, sp, -80 -; RV64D-LP64D-NEXT: sd a4, 48(sp) -; RV64D-LP64D-NEXT: sd a5, 56(sp) -; RV64D-LP64D-NEXT: sd a6, 64(sp) -; RV64D-LP64D-NEXT: sd a7, 72(sp) -; RV64D-LP64D-NEXT: sd a0, 16(sp) -; RV64D-LP64D-NEXT: sd a1, 24(sp) -; RV64D-LP64D-NEXT: sd a2, 32(sp) -; RV64D-LP64D-NEXT: sd a3, 40(sp) -; RV64D-LP64D-NEXT: addi a1, sp, 24 -; RV64D-LP64D-NEXT: sd a1, 8(sp) -; RV64D-LP64D-NEXT: addi sp, sp, 80 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va6_no_fixed_args: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 24 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va6_no_fixed_args: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -3488,125 +2986,45 @@ define i32 @va_large_stack(ptr %fmt, ...) { ; ILP32E-WITHFP-NEXT: .cfi_def_cfa_offset 0 ; ILP32E-WITHFP-NEXT: ret ; -; RV64-LABEL: va_large_stack: -; RV64: # %bb.0: -; RV64-NEXT: lui a0, 24414 -; RV64-NEXT: addi a0, a0, 336 -; RV64-NEXT: sub sp, sp, a0 -; RV64-NEXT: .cfi_def_cfa_offset 100000080 -; RV64-NEXT: lui a0, 24414 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: sd a1, 280(a0) -; RV64-NEXT: lui a0, 24414 -; RV64-NEXT: addi a0, a0, 284 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: sd a0, 8(sp) -; RV64-NEXT: lui a0, 24414 -; RV64-NEXT: add a0, sp, a0 -; RV64-NEXT: lw a0, 280(a0) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: sd a5, 312(a1) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: sd a6, 320(a1) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: sd a7, 328(a1) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: sd a2, 288(a1) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: sd a3, 296(a1) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: add a1, sp, a1 -; RV64-NEXT: sd a4, 304(a1) -; RV64-NEXT: lui a1, 24414 -; RV64-NEXT: addi a1, a1, 336 -; RV64-NEXT: add sp, sp, a1 -; RV64-NEXT: .cfi_def_cfa_offset 0 -; RV64-NEXT: ret -; -; RV64D-LP64F-LABEL: va_large_stack: -; RV64D-LP64F: # %bb.0: -; RV64D-LP64F-NEXT: lui a0, 24414 -; RV64D-LP64F-NEXT: addi a0, a0, 336 -; RV64D-LP64F-NEXT: sub sp, sp, a0 -; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 100000080 -; RV64D-LP64F-NEXT: lui a0, 24414 -; RV64D-LP64F-NEXT: add a0, sp, a0 -; RV64D-LP64F-NEXT: sd a1, 280(a0) -; RV64D-LP64F-NEXT: lui a0, 24414 -; RV64D-LP64F-NEXT: addi a0, a0, 284 -; RV64D-LP64F-NEXT: add a0, sp, a0 -; RV64D-LP64F-NEXT: sd a0, 8(sp) -; RV64D-LP64F-NEXT: lui a0, 24414 -; RV64D-LP64F-NEXT: add a0, sp, a0 -; RV64D-LP64F-NEXT: lw a0, 280(a0) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: add a1, sp, a1 -; RV64D-LP64F-NEXT: sd a5, 312(a1) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: add a1, sp, a1 -; RV64D-LP64F-NEXT: sd a6, 320(a1) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: add a1, sp, a1 -; RV64D-LP64F-NEXT: sd a7, 328(a1) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: add a1, sp, a1 -; RV64D-LP64F-NEXT: sd a2, 288(a1) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: add a1, sp, a1 -; RV64D-LP64F-NEXT: sd a3, 296(a1) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: add a1, sp, a1 -; RV64D-LP64F-NEXT: sd a4, 304(a1) -; RV64D-LP64F-NEXT: lui a1, 24414 -; RV64D-LP64F-NEXT: addi a1, a1, 336 -; RV64D-LP64F-NEXT: add sp, sp, a1 -; RV64D-LP64F-NEXT: .cfi_def_cfa_offset 0 -; RV64D-LP64F-NEXT: ret -; -; RV64D-LP64D-LABEL: va_large_stack: -; RV64D-LP64D: # %bb.0: -; RV64D-LP64D-NEXT: lui a0, 24414 -; RV64D-LP64D-NEXT: addi a0, a0, 336 -; RV64D-LP64D-NEXT: sub sp, sp, a0 -; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 100000080 -; RV64D-LP64D-NEXT: lui a0, 24414 -; RV64D-LP64D-NEXT: add a0, sp, a0 -; RV64D-LP64D-NEXT: sd a1, 280(a0) -; RV64D-LP64D-NEXT: lui a0, 24414 -; RV64D-LP64D-NEXT: addi a0, a0, 284 -; RV64D-LP64D-NEXT: add a0, sp, a0 -; RV64D-LP64D-NEXT: sd a0, 8(sp) -; RV64D-LP64D-NEXT: lui a0, 24414 -; RV64D-LP64D-NEXT: add a0, sp, a0 -; RV64D-LP64D-NEXT: lw a0, 280(a0) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: add a1, sp, a1 -; RV64D-LP64D-NEXT: sd a5, 312(a1) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: add a1, sp, a1 -; RV64D-LP64D-NEXT: sd a6, 320(a1) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: add a1, sp, a1 -; RV64D-LP64D-NEXT: sd a7, 328(a1) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: add a1, sp, a1 -; RV64D-LP64D-NEXT: sd a2, 288(a1) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: add a1, sp, a1 -; RV64D-LP64D-NEXT: sd a3, 296(a1) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: add a1, sp, a1 -; RV64D-LP64D-NEXT: sd a4, 304(a1) -; RV64D-LP64D-NEXT: lui a1, 24414 -; RV64D-LP64D-NEXT: addi a1, a1, 336 -; RV64D-LP64D-NEXT: add sp, sp, a1 -; RV64D-LP64D-NEXT: .cfi_def_cfa_offset 0 -; RV64D-LP64D-NEXT: ret +; LP64-LP64F-LP64D-FPELIM-LABEL: va_large_stack: +; LP64-LP64F-LP64D-FPELIM: # %bb.0: +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 336 +; LP64-LP64F-LP64D-FPELIM-NEXT: sub sp, sp, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 100000080 +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, sp, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 280(a0) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 284 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, sp, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, sp, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 280(a0) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 312(a1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 320(a1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 328(a1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 288(a1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 296(a1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 304(a1) +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 24414 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 336 +; LP64-LP64F-LP64D-FPELIM-NEXT: add sp, sp, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: .cfi_def_cfa_offset 0 +; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; ; LP64-LP64F-LP64D-WITHFP-LABEL: va_large_stack: ; LP64-LP64F-LP64D-WITHFP: # %bb.0: @@ -3723,5 +3141,3 @@ define i32 @va_large_stack(ptr %fmt, ...) { call void @llvm.va_end(ptr %va) ret i32 %1 } -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; LP64-LP64F-LP64D-FPELIM: {{.*}} From 747c375fab01558740866860b07f7268da9419db Mon Sep 17 00:00:00 2001 From: Alex Bradbury Date: Wed, 8 Oct 2025 15:34:37 +0100 Subject: [PATCH 5/5] Address remaining review comments --- llvm/lib/Target/RISCV/CMakeLists.txt | 2 +- llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index 8702b9e63f867..e9088a4d9275c 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -58,8 +58,8 @@ add_llvm_target(RISCVCodeGen RISCVMoveMerger.cpp RISCVOptWInstrs.cpp RISCVPostRAExpandPseudoInsts.cpp - RISCVPushPopOptimizer.cpp RISCVPromoteConstant.cpp + RISCVPushPopOptimizer.cpp RISCVRedundantCopyElimination.cpp RISCVRegisterInfo.cpp RISCVSelectionDAGInfo.cpp diff --git a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp index 89513a4c2e7ae..348927132d004 100644 --- a/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp +++ b/llvm/lib/Target/RISCV/RISCVPromoteConstant.cpp @@ -85,7 +85,7 @@ ModulePass *llvm::createRISCVPromoteConstantPass() { bool RISCVPromoteConstant::runOnFunction(Function &F, const RISCVTargetLowering *TLI) { - if (F.hasOptNone()) + if (F.hasOptNone() || F.hasOptSize()) return false; // Bail out and make no transformation if the target doesn't support @@ -100,14 +100,13 @@ bool RISCVPromoteConstant::runOnFunction(Function &F, for (Instruction &I : instructions(F)) { for (Use &U : I.operands()) { - if (auto *C = dyn_cast(U.get())) { - if (!C->getType()->isDoubleTy()) - continue; - if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, - /*ForCodeSize=*/false)) - continue; - ConstUsesMap[C].push_back(&U); - } + auto *C = dyn_cast(U.get()); + if (!C || !C->getType()->isDoubleTy()) + continue; + if (TLI->isFPImmLegal(C->getValueAPF(), MVT::f64, + /*ForCodeSize=*/false)) + continue; + ConstUsesMap[C].push_back(&U); } }