From 1e142a42ddc0b79217f76a66e50511a3d042b657 Mon Sep 17 00:00:00 2001 From: Jianjian GUAN Date: Thu, 25 Sep 2025 15:13:51 +0800 Subject: [PATCH 1/3] [RISCV][GISel] Support select vector load intrinsics Include unit-stride, strided and mask vector load intrinsics. --- .../RISCV/GISel/RISCVInstructionSelector.cpp | 112 ++ .../RISCV/GISel/RISCVRegisterBankInfo.cpp | 1 + llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 13 + llvm/lib/Target/RISCV/RISCVRegisterInfo.h | 3 + llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll | 1338 +++++++++++++++ llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll | 96 ++ .../test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll | 1462 +++++++++++++++++ 7 files changed, 3025 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 53633eac3d2c3..d97beea07a1ef 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -22,6 +22,7 @@ #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineOperand.h" #include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/Support/Debug.h" @@ -79,6 +80,9 @@ class RISCVInstructionSelector : public InstructionSelector { // Returns true if the instruction was modified. void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB); + // An early selection function that runs before the selectImpl() call. + bool earlySelect(MachineInstr &I, MachineIRBuilder &MIB); + bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB); // Custom selection methods @@ -92,6 +96,8 @@ class RISCVInstructionSelector : public InstructionSelector { void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID, MachineIRBuilder &MIB) const; bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const; + bool selectIntrinsicWithSideEffects(MachineInstr &I, + MachineIRBuilder &MIB) const; ComplexRendererFns selectShiftMask(MachineOperand &Root, unsigned ShiftWidth) const; @@ -714,6 +720,109 @@ static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) { return GenericOpc; } +bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( + MachineInstr &I, MachineIRBuilder &MIB) const { + // Find the intrinsic ID. + unsigned IntrinID = cast(I).getIntrinsicID(); + // Select the instruction. + switch (IntrinID) { + default: + return false; + case Intrinsic::riscv_vlm: + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vlse: + case Intrinsic::riscv_vlse_mask: { + bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask || + IntrinID == Intrinsic::riscv_vlse_mask; + bool IsStrided = IntrinID == Intrinsic::riscv_vlse || + IntrinID == Intrinsic::riscv_vlse_mask; + LLT VT = MRI->getType(I.getOperand(0).getReg()); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + + // Result vector + const Register DstReg = I.getOperand(0).getReg(); + const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( + MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI)); + if (IsMasked) + DstRC = TRI.getNoV0RegClass(DstRC); + RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); + + // Sources + bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm; + unsigned CurOp = 2; + SmallVector SrcOps; // Source registers. + + // Passthru + if (HasPassthruOperand) { + auto PassthruReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PassthruReg); + RBI.constrainGenericRegister(PassthruReg, *DstRC, *MRI); + } else { + auto UndefReg = MRI->createVirtualRegister(DstRC); + MIB.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(UndefReg); + SrcOps.push_back(UndefReg); + } + + // Base Pointer + auto PtrReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PtrReg); + + // Stride + if (IsStrided) { + auto StrideReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(StrideReg); + } + + // Mask + if (IsMasked) { + auto MaskReg = I.getOperand(CurOp++).getReg(); + RBI.constrainGenericRegister(MaskReg, RISCV::VMV0RegClass, *MRI); + SrcOps.push_back(MaskReg); + } + + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); + const RISCV::VLEPseudo *P = + RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW, + static_cast(LMUL)); + + auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps); + + // Select VL + auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); + for (auto &RenderFn : *VLOpFn) + RenderFn(PseudoMI); + if (auto VLReg = PseudoMI.getReg(PseudoMI.getInstr()->getNumOperands() - 1)) + RBI.constrainGenericRegister(VLReg, RISCV::GPRNoX0RegClass, *MRI); + + // SEW + PseudoMI.addImm(Log2SEW); + + // Policy + uint64_t Policy = RISCVVType::MASK_AGNOSTIC; + if (IsMasked) + Policy = I.getOperand(CurOp++).getImm(); + PseudoMI.addImm(Policy); + + // Memref + PseudoMI.cloneMemRefs(I); + + I.eraseFromParent(); + return true; + } + } +} + +bool RISCVInstructionSelector::earlySelect(MachineInstr &MI, + MachineIRBuilder &MIB) { + switch (MI.getOpcode()) { + default: + return false; + case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: + return selectIntrinsicWithSideEffects(MI, MIB); + } +} + bool RISCVInstructionSelector::select(MachineInstr &MI) { MachineIRBuilder MIB(MI); @@ -755,6 +864,9 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) { return true; } + if (earlySelect(MI, MIB)) + return true; + if (selectImpl(MI, *CoverageInfo)) return true; diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp index 9f9ae2f5c6dc6..9e2553458790d 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp @@ -17,6 +17,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterBank.h" #include "llvm/CodeGen/RegisterBankInfo.h" +#include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #define GET_TARGET_REGBANK_IMPL diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index 40b641680b2ce..af325aa93abd0 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -805,6 +805,19 @@ RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, return RC; } +const TargetRegisterClass * +RISCVRegisterInfo::getNoV0RegClass(const TargetRegisterClass *RC) const { + if (RC == &RISCV::VRRegClass) + return &RISCV::VRNoV0RegClass; + if (RC == &RISCV::VRM2RegClass) + return &RISCV::VRM2NoV0RegClass; + if (RC == &RISCV::VRM4RegClass) + return &RISCV::VRM4NoV0RegClass; + if (RC == &RISCV::VRM8RegClass) + return &RISCV::VRM8NoV0RegClass; + return RC; +} + void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl &Ops) const { // VLENB is the length of a vector register in bytes. We use diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h index 67726db504122..449e00c561d13 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -131,6 +131,9 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo { getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override; + const TargetRegisterClass * + getNoV0RegClass(const TargetRegisterClass *RC) const; + void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl &Ops) const override; diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll new file mode 100644 index 0000000000000..3e70ce2553668 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll @@ -0,0 +1,1338 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vle.nxv1i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv32i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv32i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv32i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv32i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv32i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv32i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv64i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv64i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv64i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv64i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll new file mode 100644 index 0000000000000..12279639893bc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll @@ -0,0 +1,96 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -global-isel -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -global-isel -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.vlm.nxv1i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv1i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv2i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv2i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv4i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv4i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv8i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv8i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv16i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv16i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv32i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv32i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv64i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv64i1(ptr %0, iXLen %1) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll new file mode 100644 index 0000000000000..2339ad33ad06d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll @@ -0,0 +1,1462 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vlse.nxv1i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv32i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv32i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv32i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv32i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv64i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv64i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv64i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv64i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} From fadec492a032548ed396063943ba907cca35ca59 Mon Sep 17 00:00:00 2001 From: Jianjian GUAN Date: Fri, 10 Oct 2025 18:12:29 +0800 Subject: [PATCH 2/3] Address comment --- .../RISCV/GISel/RISCVInstructionSelector.cpp | 33 +++---------------- llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 13 -------- llvm/lib/Target/RISCV/RISCVRegisterInfo.h | 3 -- 3 files changed, 4 insertions(+), 45 deletions(-) diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index d97beea07a1ef..f927b3d3f73e5 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -80,9 +80,6 @@ class RISCVInstructionSelector : public InstructionSelector { // Returns true if the instruction was modified. void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB); - // An early selection function that runs before the selectImpl() call. - bool earlySelect(MachineInstr &I, MachineIRBuilder &MIB); - bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB); // Custom selection methods @@ -742,11 +739,6 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( // Result vector const Register DstReg = I.getOperand(0).getReg(); - const TargetRegisterClass *DstRC = getRegClassForTypeOnBank( - MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI)); - if (IsMasked) - DstRC = TRI.getNoV0RegClass(DstRC); - RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); // Sources bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm; @@ -757,11 +749,8 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( if (HasPassthruOperand) { auto PassthruReg = I.getOperand(CurOp++).getReg(); SrcOps.push_back(PassthruReg); - RBI.constrainGenericRegister(PassthruReg, *DstRC, *MRI); } else { - auto UndefReg = MRI->createVirtualRegister(DstRC); - MIB.buildInstr(TargetOpcode::IMPLICIT_DEF).addDef(UndefReg); - SrcOps.push_back(UndefReg); + SrcOps.push_back(Register(RISCV::NoRegister)); } // Base Pointer @@ -777,7 +766,6 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( // Mask if (IsMasked) { auto MaskReg = I.getOperand(CurOp++).getReg(); - RBI.constrainGenericRegister(MaskReg, RISCV::VMV0RegClass, *MRI); SrcOps.push_back(MaskReg); } @@ -792,8 +780,6 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); for (auto &RenderFn : *VLOpFn) RenderFn(PseudoMI); - if (auto VLReg = PseudoMI.getReg(PseudoMI.getInstr()->getNumOperands() - 1)) - RBI.constrainGenericRegister(VLReg, RISCV::GPRNoX0RegClass, *MRI); // SEW PseudoMI.addImm(Log2SEW); @@ -808,21 +794,11 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( PseudoMI.cloneMemRefs(I); I.eraseFromParent(); - return true; + return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); } } } -bool RISCVInstructionSelector::earlySelect(MachineInstr &MI, - MachineIRBuilder &MIB) { - switch (MI.getOpcode()) { - default: - return false; - case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: - return selectIntrinsicWithSideEffects(MI, MIB); - } -} - bool RISCVInstructionSelector::select(MachineInstr &MI) { MachineIRBuilder MIB(MI); @@ -864,9 +840,6 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) { return true; } - if (earlySelect(MI, MIB)) - return true; - if (selectImpl(MI, *CoverageInfo)) return true; @@ -1096,6 +1069,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) { return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI); } + case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: + return selectIntrinsicWithSideEffects(MI, MIB); default: return false; } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index af325aa93abd0..40b641680b2ce 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -805,19 +805,6 @@ RISCVRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC, return RC; } -const TargetRegisterClass * -RISCVRegisterInfo::getNoV0RegClass(const TargetRegisterClass *RC) const { - if (RC == &RISCV::VRRegClass) - return &RISCV::VRNoV0RegClass; - if (RC == &RISCV::VRM2RegClass) - return &RISCV::VRM2NoV0RegClass; - if (RC == &RISCV::VRM4RegClass) - return &RISCV::VRM4NoV0RegClass; - if (RC == &RISCV::VRM8RegClass) - return &RISCV::VRM8NoV0RegClass; - return RC; -} - void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl &Ops) const { // VLENB is the length of a vector register in bytes. We use diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h index 449e00c561d13..67726db504122 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -131,9 +131,6 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo { getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override; - const TargetRegisterClass * - getNoV0RegClass(const TargetRegisterClass *RC) const; - void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl &Ops) const override; From a9a2e3c83f1d91e5dd38622c6b0611c27ba15ce6 Mon Sep 17 00:00:00 2001 From: Jianjian GUAN Date: Sat, 11 Oct 2025 16:54:29 +0800 Subject: [PATCH 3/3] Remove mis-added include --- llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp | 1 - llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index f927b3d3f73e5..81981732ee080 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -22,7 +22,6 @@ #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" -#include "llvm/CodeGen/MachineOperand.h" #include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/Support/Debug.h" diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp index 9e2553458790d..9f9ae2f5c6dc6 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp @@ -17,7 +17,6 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterBank.h" #include "llvm/CodeGen/RegisterBankInfo.h" -#include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #define GET_TARGET_REGBANK_IMPL