diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp index 81981732ee080..282cf5d681685 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp @@ -92,6 +92,10 @@ class RISCVInstructionSelector : public InstructionSelector { void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID, MachineIRBuilder &MIB) const; bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB) const; + void addVectorLoadStoreOperands(MachineInstr &I, + SmallVectorImpl &SrcOps, + unsigned &CurOp, bool IsMasked, + bool IsStrided) const; bool selectIntrinsicWithSideEffects(MachineInstr &I, MachineIRBuilder &MIB) const; @@ -716,6 +720,26 @@ static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) { return GenericOpc; } +void RISCVInstructionSelector::addVectorLoadStoreOperands( + MachineInstr &I, SmallVectorImpl &SrcOps, unsigned &CurOp, + bool IsMasked, bool IsStrided) const { + // Base Pointer + auto PtrReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PtrReg); + + // Stride + if (IsStrided) { + auto StrideReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(StrideReg); + } + + // Mask + if (IsMasked) { + auto MaskReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(MaskReg); + } +} + bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( MachineInstr &I, MachineIRBuilder &MIB) const { // Find the intrinsic ID. @@ -752,21 +776,7 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( SrcOps.push_back(Register(RISCV::NoRegister)); } - // Base Pointer - auto PtrReg = I.getOperand(CurOp++).getReg(); - SrcOps.push_back(PtrReg); - - // Stride - if (IsStrided) { - auto StrideReg = I.getOperand(CurOp++).getReg(); - SrcOps.push_back(StrideReg); - } - - // Mask - if (IsMasked) { - auto MaskReg = I.getOperand(CurOp++).getReg(); - SrcOps.push_back(MaskReg); - } + addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided); RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); const RISCV::VLEPseudo *P = @@ -795,6 +805,48 @@ bool RISCVInstructionSelector::selectIntrinsicWithSideEffects( I.eraseFromParent(); return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); } + case Intrinsic::riscv_vsm: + case Intrinsic::riscv_vse: + case Intrinsic::riscv_vse_mask: + case Intrinsic::riscv_vsse: + case Intrinsic::riscv_vsse_mask: { + bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask || + IntrinID == Intrinsic::riscv_vsse_mask; + bool IsStrided = IntrinID == Intrinsic::riscv_vsse || + IntrinID == Intrinsic::riscv_vsse_mask; + LLT VT = MRI->getType(I.getOperand(1).getReg()); + unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); + + // Sources + unsigned CurOp = 1; + SmallVector SrcOps; // Source registers. + + // Store value + auto PassthruReg = I.getOperand(CurOp++).getReg(); + SrcOps.push_back(PassthruReg); + + addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided); + + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(getMVTForLLT(VT)); + const RISCV::VSEPseudo *P = RISCV::getVSEPseudo( + IsMasked, IsStrided, Log2SEW, static_cast(LMUL)); + + auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps); + + // Select VL + auto VLOpFn = renderVLOp(I.getOperand(CurOp++)); + for (auto &RenderFn : *VLOpFn) + RenderFn(PseudoMI); + + // SEW + PseudoMI.addImm(Log2SEW); + + // Memref + PseudoMI.cloneMemRefs(I); + + I.eraseFromParent(); + return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI); + } } } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll new file mode 100644 index 0000000000000..785d9fc6a7970 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll @@ -0,0 +1,1575 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare void @llvm.riscv.vse.nxv1i64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1i64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1i64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +define void @intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1i64( + %0, + ptr %1, + splat (i1 true), + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2i64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2i64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2i64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2i64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4i64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4i64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4i64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4i64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8i64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8i64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8i64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8i64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1f64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1f64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1f64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1f64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2f64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2f64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2f64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2f64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4f64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4f64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4f64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4f64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8f64( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8f64( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8f64( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8f64( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1i32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1i32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2i32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2i32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4i32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4i32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8i32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8i32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv16i32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv16i32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv16i32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv16i32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1f32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1f32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1f32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1f32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2f32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2f32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2f32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2f32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4f32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4f32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4f32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4f32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8f32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8f32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8f32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8f32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv16f32( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv16f32( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv16f32( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vse32.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv16f32( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1i16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1i16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2i16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2i16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4i16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4i16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8i16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8i16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv16i16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv16i16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv16i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv16i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv32i16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv32i16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv32i16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv32i16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1f16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1f16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1f16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1f16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2f16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2f16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2f16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2f16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4f16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4f16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4f16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4f16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8f16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8f16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8f16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8f16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv16f16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv16f16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv16f16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv16f16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv32f16( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv32f16( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv32f16( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vse16.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv32f16( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv1i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv1i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv1i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv2i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv2i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv2i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv2i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv4i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv4i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv4i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv4i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv8i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv8i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv8i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv8i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv16i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv16i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv16i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv16i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv32i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv32i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv32i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv32i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vse.nxv64i8( + , + ptr, + iXLen); + +define void @intrinsic_vse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.nxv64i8( + %0, + ptr %1, + iXLen %2) + + ret void +} + +declare void @llvm.riscv.vse.mask.nxv64i8( + , + ptr, + , + iXLen); + +define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vse8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv64i8( + %0, + ptr %1, + %2, + iXLen %3) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll new file mode 100644 index 0000000000000..5237536c07740 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll @@ -0,0 +1,139 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ +; RUN: -global-isel -verify-machineinstrs | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ +; RUN: -global-isel -verify-machineinstrs | FileCheck %s + +declare void @llvm.riscv.vsm.nxv1i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv1i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv1i1( %0, ptr %1, iXLen %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv2i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv2i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv2i1( %0, ptr %1, iXLen %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv4i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv4i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv4i1( %0, ptr %1, iXLen %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv8i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv8i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv8i1( %0, ptr %1, iXLen %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv16i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv16i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv16i1( %0, ptr %1, iXLen %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv32i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv32i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv32i1( %0, ptr %1, iXLen %2) + ret void +} + +declare void @llvm.riscv.vsm.nxv64i1(, ptr, iXLen); + +define void @intrinsic_vsm_v_nxv64i1( %0, ptr %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm_v_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vsm.v v0, (a0) +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsm.nxv64i1( %0, ptr %1, iXLen %2) + ret void +} + +declare @llvm.riscv.vmseq.nxv1i16( + , + , + iXLen); + +; Make sure we can use the vsetvli from the producing instruction. +define void @test_vsetvli_i16( %0, %1, ptr %2, iXLen %3) nounwind { +; CHECK-LABEL: test_vsetvli_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vmseq.vv v8, v8, v9 +; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmseq.nxv1i16( + %0, + %1, + iXLen %3) + call void @llvm.riscv.vsm.nxv1i1( %a, ptr %2, iXLen %3) + ret void +} + +declare @llvm.riscv.vmseq.nxv1i32( + , + , + iXLen); + +define void @test_vsetvli_i32( %0, %1, ptr %2, iXLen %3) nounwind { +; CHECK-LABEL: test_vsetvli_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmseq.vv v8, v8, v9 +; CHECK-NEXT: vsm.v v8, (a0) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmseq.nxv1i32( + %0, + %1, + iXLen %3) + call void @llvm.riscv.vsm.nxv1i1( %a, ptr %2, iXLen %3) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll new file mode 100644 index 0000000000000..b7609ff5fd1cd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll @@ -0,0 +1,1724 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \ +; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare void @llvm.riscv.vsse.nxv1i64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1i64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +define void @intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1i64( + %0, + ptr %1, + iXLen %2, + splat (i1 true), + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2i64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4i64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8i64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8i64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1f64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2f64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4f64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f64( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8f64( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f64( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8f64( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1i32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2i32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4i32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8i32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv16i32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv16i32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1f32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2f32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4f32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8f32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16f32( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv16f32( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16f32( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv16f32( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1i16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2i16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4i16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8i16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv16i16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv16i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32i16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv32i16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32i16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv32i16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1f16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1f16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2f16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2f16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4f16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4f16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8f16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8f16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16f16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv16f16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16f16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv16f16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32f16( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv32f16( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32f16( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv32f16( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv1i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv2i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv2i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv4i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv4i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv8i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv8i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv16i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv16i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv32i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv32i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv64i8( + , + ptr, + iXLen, + iXLen); + +define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1 +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.nxv64i8( + %0, + ptr %1, + iXLen %2, + iXLen %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv64i8( + , + ptr, + iXLen, + , + iXLen); + +define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv64i8( + %0, + ptr %1, + iXLen %2, + %3, + iXLen %4) + + ret void +}