diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 53633eac3d2c3..81981732ee080 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -92,6 +92,8 @@ class RISCVInstructionSelector : public InstructionSelector { void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID, MachineIRBuilder &MIB) const; bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const; + bool selectIntrinsicWithSideEffects(MachineInstr &I, + MachineIRBuilder &MIB) const; ComplexRendererFns selectShiftMask(MachineOperand &Root, unsigned ShiftWidth) const; @@ -714,6 +716,88 @@ static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) { return GenericOpc; } +bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( + MachineInstr &I, MachineIRBuilder &MIB) const { + // Find the intrinsic ID. + unsigned IntrinID = cast(I).getIntrinsicID(); + // Select the instruction. + switch (IntrinID) { + default: + return false; + case Intrinsic::riscv_vlm: + case Intrinsic::riscv_vle: + case Intrinsic::riscv_vle_mask: + case Intrinsic::riscv_vlse: + case Intrinsic::riscv_vlse_mask: { + bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask || + IntrinID == Intrinsic::riscv_vlse_mask; + bool IsStrided = IntrinID == Intrinsic::riscv_vlse || + IntrinID == Intrinsic::riscv_vlse_mask; + LLT VT = MRI->getType(I.getOperand(0).getReg()); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + + // Result vector + const Register DstReg = I.getOperand(0).getReg(); + + // Sources + bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm; + unsigned CurOp = 2; + SmallVector SrcOps; // Source registers. + + // Passthru + if (HasPassthruOperand) { + auto PassthruReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PassthruReg); + } else { + SrcOps.push_back(Register(RISCV::NoRegister)); + } + + // Base Pointer + auto PtrReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PtrReg); + + // Stride + if (IsStrided) { + auto StrideReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(StrideReg); + } + + // Mask + if (IsMasked) { + auto MaskReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(MaskReg); + } + + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); + const RISCV::VLEPseudo *P = + RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW, + static_cast(LMUL)); + + auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps); + + // Select VL + auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); + for (auto &RenderFn : *VLOpFn) + RenderFn(PseudoMI); + + // SEW + PseudoMI.addImm(Log2SEW); + + // Policy + uint64_t Policy = RISCVVType::MASK_AGNOSTIC; + if (IsMasked) + Policy = I.getOperand(CurOp++).getImm(); + PseudoMI.addImm(Policy); + + // Memref + PseudoMI.cloneMemRefs(I); + + I.eraseFromParent(); + return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); + } + } +} + bool RISCVInstructionSelector::select(MachineInstr &MI) { MachineIRBuilder MIB(MI); @@ -984,6 +1068,8 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) { return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI); } + case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: + return selectIntrinsicWithSideEffects(MI, MIB); default: return false; } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll new file mode 100644 index 0000000000000..3e70ce2553668 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vle.ll @@ -0,0 +1,1338 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vle.nxv1i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8f64( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8f64( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8f64( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8f64( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16i32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16i32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16f32( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16f32( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16f32( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16f32( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv32i16( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv32i16( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv32i16( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv1i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv1i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv2i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv2i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv4i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv4i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv4i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv8i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv8i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv8i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv16i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv16i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv32i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv32i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv32i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vle.nxv64i8( + , + ptr, + iXLen); + +define @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv64i8( + poison, + ptr %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vle.mask.nxv64i8( + , + ptr, + , + iXLen, + iXLen); + +define @intrinsic_vle_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.mask.nxv64i8( + %0, + ptr %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll new file mode 100644 index 0000000000000..12279639893bc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlm.ll @@ -0,0 +1,96 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -global-isel -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -global-isel -verify-machineinstrs | FileCheck %s + +declare @llvm.riscv.vlm.nxv1i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv1i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv1i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv2i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv2i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv2i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv4i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv4i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv4i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv8i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv8i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv8i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv16i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv16i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv16i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv32i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv32i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv32i1(ptr %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vlm.nxv64i1(ptr, iXLen); + +define @intrinsic_vlm_v_nxv64i1(ptr %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vlm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vlm.v v0, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlm.nxv64i1(ptr %0, iXLen %1) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll new file mode 100644 index 0000000000000..2339ad33ad06d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vlse.ll @@ -0,0 +1,1462 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vlse.nxv1i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f64( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vlse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8f64( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f64( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16i32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16f32( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vlse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16f32( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16f32( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i16( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; CHECK-NEXT: vlse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv32i16( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i16( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu +; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv32i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv1i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv1i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv2i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv2i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv4i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv4i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv8i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv8i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv16i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv16i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv32i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv32i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vlse.nxv64i8( + , + ptr, + iXLen, + iXLen); + +define @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vlse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.nxv64i8( + poison, + ptr %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv64i8( + , + ptr, + iXLen, + , + iXLen, + iXLen); + +define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu +; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vlse.mask.nxv64i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4, iXLen 1) + + ret %a +}