diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index 1b322d30e7100..ec23aaf70fedd 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -15394,6 +15394,8 @@ Semantics: The '``llvm.ctpop``' intrinsic counts the 1's in a variable, or within each element of a vector. +.. _int_ctlz: + '``llvm.ctlz.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -15438,6 +15440,8 @@ zeros in a variable, or within each element of the vector. If if ``is_zero_poison == 0`` and ``poison`` otherwise. For example, ``llvm.ctlz(i32 2) = 30``. +.. _int_cttz: + '``llvm.cttz.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -22278,6 +22282,100 @@ Examples: %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison +.. _int_vp_ctlz: + +'``llvm.vp.ctlz.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x i32> @llvm.vp.ctlz.v16i32 (<16 x i32> , <16 x i1> , i32 , i1 ) + declare @llvm.vp.ctlz.nxv4i32 ( , , i32 , i1 ) + declare <256 x i64> @llvm.vp.ctlz.v256i64 (<256 x i64> , <256 x i1> , i32 , i1 ) + +Overview: +""""""""" + +Predicated ctlz of a vector of integers. + + +Arguments: +"""""""""" + +The first operand and the result have the same vector of integer type. The +second operand is the vector mask and has the same number of elements as the +result vector type. The third operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.ctlz``' intrinsic performs ctlz (:ref:`ctlz `) of the first operand on each +enabled lane. The result on disabled lanes is a :ref:`poison value `. + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl, i1 false) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) + %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison + + +.. _int_vp_cttz: + +'``llvm.vp.cttz.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x i32> @llvm.vp.cttz.v16i32 (<16 x i32> , <16 x i1> , i32 , i1 ) + declare @llvm.vp.cttz.nxv4i32 ( , , i32 , i1 ) + declare <256 x i64> @llvm.vp.cttz.v256i64 (<256 x i64> , <256 x i1> , i32 , i1 ) + +Overview: +""""""""" + +Predicated cttz of a vector of integers. + + +Arguments: +"""""""""" + +The first operand and the result have the same vector of integer type. The +second operand is the vector mask and has the same number of elements as the +result vector type. The third operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.cttz``' intrinsic performs cttz (:ref:`cttz `) of the first operand on each +enabled lane. The result on disabled lanes is a :ref:`poison value `. + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl, i1 false) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 false) + %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison + + .. _int_vp_fshl: '``llvm.vp.fshl.*``' Intrinsics diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index 438b60b48caef..f6179f9dd7241 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4911,6 +4911,11 @@ class TargetLowering : public TargetLoweringBase { /// \returns The expansion result or SDValue() if it fails. SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const; + /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes. + /// \param N Node to expand + /// \returns The expansion result or SDValue() if it fails. + SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const; + /// Expand CTTZ via Table Lookup. /// \param N Node to expand /// \returns The expansion result or SDValue() if it fails. @@ -4923,6 +4928,11 @@ class TargetLowering : public TargetLoweringBase { /// \returns The expansion result or SDValue() if it fails. SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const; + /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes. + /// \param N Node to expand + /// \returns The expansion result or SDValue() if it fails. + SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const; + /// Expand ABS nodes. Expands vector/scalar ABS nodes, /// vector nodes can only succeed if all operations are legal/custom. /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size)) diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 41dd958e068d7..6b9fef718dfe7 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1817,6 +1817,19 @@ let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in { llvm_i32_ty]>; } +let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg>] in { + def int_vp_ctlz : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty, + llvm_i1_ty]>; + def int_vp_cttz : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty, + llvm_i1_ty]>; +} + def int_get_active_lane_mask: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyint_ty, LLVMMatchType<1>], diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def index aa69fe2772f1b..e4ac854368a54 100644 --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -228,6 +228,22 @@ END_REGISTER_VP(vp_bitreverse, VP_BITREVERSE) BEGIN_REGISTER_VP(vp_ctpop, 1, 2, VP_CTPOP, -1) END_REGISTER_VP(vp_ctpop, VP_CTPOP) +// llvm.vp.ctlz(x,mask,vlen, is_zero_poison) +BEGIN_REGISTER_VP_INTRINSIC(vp_ctlz, 1, 2) +BEGIN_REGISTER_VP_SDNODE(VP_CTLZ, -1, vp_ctlz, 1, 2) +END_REGISTER_VP_SDNODE(VP_CTLZ) +BEGIN_REGISTER_VP_SDNODE(VP_CTLZ_ZERO_UNDEF, -1, vp_ctlz_zero_undef, 1, 2) +END_REGISTER_VP_SDNODE(VP_CTLZ_ZERO_UNDEF) +END_REGISTER_VP_INTRINSIC(vp_ctlz) + +// llvm.vp.cttz(x,mask,vlen, is_zero_poison) +BEGIN_REGISTER_VP_INTRINSIC(vp_cttz, 1, 2) +BEGIN_REGISTER_VP_SDNODE(VP_CTTZ, -1, vp_cttz, 1, 2) +END_REGISTER_VP_SDNODE(VP_CTTZ) +BEGIN_REGISTER_VP_SDNODE(VP_CTTZ_ZERO_UNDEF, -1, vp_cttz_zero_undef, 1, 2) +END_REGISTER_VP_SDNODE(VP_CTTZ_ZERO_UNDEF) +END_REGISTER_VP_INTRINSIC(vp_cttz) + // llvm.vp.fshl(x,y,z,mask,vlen) BEGIN_REGISTER_VP(vp_fshl, 3, 4, VP_FSHL, -1) END_REGISTER_VP(vp_fshl, VP_FSHL) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index eb00d3f5681a7..e245b3cb4c6d6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -813,6 +813,13 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl &Results) { return; } break; + case ISD::VP_CTLZ: + case ISD::VP_CTLZ_ZERO_UNDEF: + if (SDValue Expanded = TLI.expandVPCTLZ(Node, DAG)) { + Results.push_back(Expanded); + return; + } + break; case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: if (SDValue Expanded = TLI.expandCTTZ(Node, DAG)) { @@ -820,6 +827,13 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl &Results) { return; } break; + case ISD::VP_CTTZ: + case ISD::VP_CTTZ_ZERO_UNDEF: + if (SDValue Expanded = TLI.expandVPCTTZ(Node, DAG)) { + Results.push_back(Expanded); + return; + } + break; case ISD::FSHL: case ISD::VP_FSHL: case ISD::FSHR: diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 3bec57971f93b..1c6bbd79329ff 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1016,9 +1016,13 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { case ISD::BSWAP: case ISD::VP_BSWAP: case ISD::CTLZ: + case ISD::VP_CTLZ: case ISD::CTTZ: + case ISD::VP_CTTZ: case ISD::CTLZ_ZERO_UNDEF: + case ISD::VP_CTLZ_ZERO_UNDEF: case ISD::CTTZ_ZERO_UNDEF: + case ISD::VP_CTTZ_ZERO_UNDEF: case ISD::CTPOP: case ISD::VP_CTPOP: case ISD::FABS: case ISD::VP_FABS: @@ -4097,11 +4101,15 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::BSWAP: case ISD::VP_BSWAP: case ISD::CTLZ: + case ISD::VP_CTLZ: case ISD::CTLZ_ZERO_UNDEF: + case ISD::VP_CTLZ_ZERO_UNDEF: case ISD::CTPOP: case ISD::VP_CTPOP: case ISD::CTTZ: + case ISD::VP_CTTZ: case ISD::CTTZ_ZERO_UNDEF: + case ISD::VP_CTTZ_ZERO_UNDEF: case ISD::FNEG: case ISD::VP_FNEG: case ISD::VP_FABS: case ISD::VP_SQRT: diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 5554973d86a56..04f10e95f9c0e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7437,6 +7437,16 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic( static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) { std::optional ResOPC; switch (VPIntrin.getIntrinsicID()) { + case Intrinsic::vp_ctlz: { + bool IsZeroUndef = cast(VPIntrin.getArgOperand(3))->isOne(); + ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ; + break; + } + case Intrinsic::vp_cttz: { + bool IsZeroUndef = cast(VPIntrin.getArgOperand(3))->isOne(); + ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ; + break; + } #define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \ case Intrinsic::VPID: \ ResOPC = ISD::VPSD; \ @@ -7771,6 +7781,16 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic( setValue(&VPIntrin, N); break; } + case ISD::VP_CTLZ: + case ISD::VP_CTLZ_ZERO_UNDEF: + case ISD::VP_CTTZ: + case ISD::VP_CTTZ_ZERO_UNDEF: { + // Pop is_zero_poison operand. + OpValues.pop_back(); + SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues); + setValue(&VPIntrin, Result); + break; + } } } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index ebe771605030c..e9ef26a138ad1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8382,6 +8382,33 @@ SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { return DAG.getNode(ISD::CTPOP, dl, VT, Op); } +SDValue TargetLowering::expandVPCTLZ(SDNode *Node, SelectionDAG &DAG) const { + SDLoc dl(Node); + EVT VT = Node->getValueType(0); + EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); + SDValue Op = Node->getOperand(0); + SDValue Mask = Node->getOperand(1); + SDValue VL = Node->getOperand(2); + unsigned NumBitsPerElt = VT.getScalarSizeInBits(); + + // do this: + // x = x | (x >> 1); + // x = x | (x >> 2); + // ... + // x = x | (x >>16); + // x = x | (x >>32); // for 64-bit input + // return popcount(~x); + for (unsigned i = 0; (1U << i) < NumBitsPerElt; ++i) { + SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); + Op = DAG.getNode(ISD::VP_OR, dl, VT, Op, + DAG.getNode(ISD::VP_LSHR, dl, VT, Op, Tmp, Mask, VL), Mask, + VL); + } + Op = DAG.getNode(ISD::VP_XOR, dl, VT, Op, DAG.getConstant(-1, dl, VT), Mask, + VL); + return DAG.getNode(ISD::VP_CTPOP, dl, VT, Op, Mask, VL); +} + SDValue TargetLowering::CTTZTableLookup(SDNode *Node, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned BitWidth) const { @@ -8482,6 +8509,22 @@ SDValue TargetLowering::expandCTTZ(SDNode *Node, SelectionDAG &DAG) const { return DAG.getNode(ISD::CTPOP, dl, VT, Tmp); } +SDValue TargetLowering::expandVPCTTZ(SDNode *Node, SelectionDAG &DAG) const { + SDValue Op = Node->getOperand(0); + SDValue Mask = Node->getOperand(1); + SDValue VL = Node->getOperand(2); + SDLoc dl(Node); + EVT VT = Node->getValueType(0); + + // Same as the vector part of expandCTTZ, use: popcount(~x & (x - 1)) + SDValue Not = DAG.getNode(ISD::VP_XOR, dl, VT, Op, + DAG.getConstant(-1, dl, VT), Mask, VL); + SDValue MinusOne = DAG.getNode(ISD::VP_SUB, dl, VT, Op, + DAG.getConstant(1, dl, VT), Mask, VL); + SDValue Tmp = DAG.getNode(ISD::VP_AND, dl, VT, Not, MinusOne, Mask, VL); + return DAG.getNode(ISD::VP_CTPOP, dl, VT, Tmp, Mask, VL); +} + SDValue TargetLowering::expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative) const { SDLoc dl(N); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index cb92282014f91..634d7f6561795 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -611,7 +611,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::BSWAP, VT, Expand); setOperationAction({ISD::VP_BSWAP, ISD::VP_BITREVERSE}, VT, Expand); setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand); - setOperationAction(ISD::VP_CTPOP, VT, Expand); + setOperationAction({ISD::VP_CTLZ, ISD::VP_CTLZ_ZERO_UNDEF, ISD::VP_CTTZ, + ISD::VP_CTTZ_ZERO_UNDEF, ISD::VP_CTPOP}, + VT, Expand); // Custom-lower extensions and truncations from/to mask types. setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index aca2f7263fdd5..39a2b740fa6f7 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -746,6 +746,82 @@ static const CostTblEntry VectorIntrinsicCostTable[]{ {Intrinsic::vp_ctpop, MVT::nxv2i64, 21}, {Intrinsic::vp_ctpop, MVT::nxv4i64, 21}, {Intrinsic::vp_ctpop, MVT::nxv8i64, 21}, + {Intrinsic::vp_ctlz, MVT::v2i8, 19}, + {Intrinsic::vp_ctlz, MVT::v4i8, 19}, + {Intrinsic::vp_ctlz, MVT::v8i8, 19}, + {Intrinsic::vp_ctlz, MVT::v16i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv1i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv2i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv4i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv8i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv16i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv32i8, 19}, + {Intrinsic::vp_ctlz, MVT::nxv64i8, 19}, + {Intrinsic::vp_ctlz, MVT::v2i16, 28}, + {Intrinsic::vp_ctlz, MVT::v4i16, 28}, + {Intrinsic::vp_ctlz, MVT::v8i16, 28}, + {Intrinsic::vp_ctlz, MVT::v16i16, 28}, + {Intrinsic::vp_ctlz, MVT::nxv1i16, 28}, + {Intrinsic::vp_ctlz, MVT::nxv2i16, 28}, + {Intrinsic::vp_ctlz, MVT::nxv4i16, 28}, + {Intrinsic::vp_ctlz, MVT::nxv8i16, 28}, + {Intrinsic::vp_ctlz, MVT::nxv16i16, 28}, + {Intrinsic::vp_ctlz, MVT::nxv32i16, 28}, + {Intrinsic::vp_ctlz, MVT::v2i32, 31}, + {Intrinsic::vp_ctlz, MVT::v4i32, 31}, + {Intrinsic::vp_ctlz, MVT::v8i32, 31}, + {Intrinsic::vp_ctlz, MVT::v16i32, 31}, + {Intrinsic::vp_ctlz, MVT::nxv1i32, 31}, + {Intrinsic::vp_ctlz, MVT::nxv2i32, 31}, + {Intrinsic::vp_ctlz, MVT::nxv4i32, 31}, + {Intrinsic::vp_ctlz, MVT::nxv8i32, 31}, + {Intrinsic::vp_ctlz, MVT::nxv16i32, 31}, + {Intrinsic::vp_ctlz, MVT::v2i64, 35}, + {Intrinsic::vp_ctlz, MVT::v4i64, 35}, + {Intrinsic::vp_ctlz, MVT::v8i64, 35}, + {Intrinsic::vp_ctlz, MVT::v16i64, 35}, + {Intrinsic::vp_ctlz, MVT::nxv1i64, 35}, + {Intrinsic::vp_ctlz, MVT::nxv2i64, 35}, + {Intrinsic::vp_ctlz, MVT::nxv4i64, 35}, + {Intrinsic::vp_ctlz, MVT::nxv8i64, 35}, + {Intrinsic::vp_cttz, MVT::v2i8, 16}, + {Intrinsic::vp_cttz, MVT::v4i8, 16}, + {Intrinsic::vp_cttz, MVT::v8i8, 16}, + {Intrinsic::vp_cttz, MVT::v16i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv1i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv2i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv4i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv8i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv16i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv32i8, 16}, + {Intrinsic::vp_cttz, MVT::nxv64i8, 16}, + {Intrinsic::vp_cttz, MVT::v2i16, 23}, + {Intrinsic::vp_cttz, MVT::v4i16, 23}, + {Intrinsic::vp_cttz, MVT::v8i16, 23}, + {Intrinsic::vp_cttz, MVT::v16i16, 23}, + {Intrinsic::vp_cttz, MVT::nxv1i16, 23}, + {Intrinsic::vp_cttz, MVT::nxv2i16, 23}, + {Intrinsic::vp_cttz, MVT::nxv4i16, 23}, + {Intrinsic::vp_cttz, MVT::nxv8i16, 23}, + {Intrinsic::vp_cttz, MVT::nxv16i16, 23}, + {Intrinsic::vp_cttz, MVT::nxv32i16, 23}, + {Intrinsic::vp_cttz, MVT::v2i32, 24}, + {Intrinsic::vp_cttz, MVT::v4i32, 24}, + {Intrinsic::vp_cttz, MVT::v8i32, 24}, + {Intrinsic::vp_cttz, MVT::v16i32, 24}, + {Intrinsic::vp_cttz, MVT::nxv1i32, 24}, + {Intrinsic::vp_cttz, MVT::nxv2i32, 24}, + {Intrinsic::vp_cttz, MVT::nxv4i32, 24}, + {Intrinsic::vp_cttz, MVT::nxv8i32, 24}, + {Intrinsic::vp_cttz, MVT::nxv16i32, 24}, + {Intrinsic::vp_cttz, MVT::v2i64, 25}, + {Intrinsic::vp_cttz, MVT::v4i64, 25}, + {Intrinsic::vp_cttz, MVT::v8i64, 25}, + {Intrinsic::vp_cttz, MVT::v16i64, 25}, + {Intrinsic::vp_cttz, MVT::nxv1i64, 25}, + {Intrinsic::vp_cttz, MVT::nxv2i64, 25}, + {Intrinsic::vp_cttz, MVT::nxv4i64, 25}, + {Intrinsic::vp_cttz, MVT::nxv8i64, 25}, }; static unsigned getISDForVPIntrinsicID(Intrinsic::ID ID) { diff --git a/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll index 15b8f097cf100..1d82c27145bed 100644 --- a/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll +++ b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll @@ -382,6 +382,216 @@ define void @vp_ctpop() { ret void } +define void @vp_ctlz() { +; CHECK-LABEL: 'vp_ctlz' +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %1 = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %2 = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %3 = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %4 = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %5 = call @llvm.vp.ctlz.nxv1i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %6 = call @llvm.vp.ctlz.nxv2i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %7 = call @llvm.vp.ctlz.nxv4i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %8 = call @llvm.vp.ctlz.nxv8i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %9 = call @llvm.vp.ctlz.nxv16i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %10 = call @llvm.vp.ctlz.nxv32i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %11 = call @llvm.vp.ctlz.nxv64i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %12 = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %13 = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %14 = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %15 = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %16 = call @llvm.vp.ctlz.nxv1i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %17 = call @llvm.vp.ctlz.nxv2i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %18 = call @llvm.vp.ctlz.nxv4i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %19 = call @llvm.vp.ctlz.nxv8i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %20 = call @llvm.vp.ctlz.nxv16i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %21 = call @llvm.vp.ctlz.nxv32i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %22 = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %23 = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %24 = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %25 = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %26 = call @llvm.vp.ctlz.nxv1i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %27 = call @llvm.vp.ctlz.nxv2i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %28 = call @llvm.vp.ctlz.nxv4i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %29 = call @llvm.vp.ctlz.nxv8i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %30 = call @llvm.vp.ctlz.nxv16i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %31 = call @llvm.vp.ctlz.nxv32i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %32 = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %33 = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %34 = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %35 = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %36 = call @llvm.vp.ctlz.nxv1i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %37 = call @llvm.vp.ctlz.nxv2i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %38 = call @llvm.vp.ctlz.nxv4i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %39 = call @llvm.vp.ctlz.nxv8i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %40 = call @llvm.vp.ctlz.nxv16i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %41 = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %42 = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %43 = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %44 = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %45 = call @llvm.vp.ctlz.nxv1i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %46 = call @llvm.vp.ctlz.nxv2i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %47 = call @llvm.vp.ctlz.nxv4i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %48 = call @llvm.vp.ctlz.nxv8i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 70 for instruction: %49 = call @llvm.vp.ctlz.nxv16i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx1i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx2i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx4i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx8i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx16i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx32i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx64i8( undef, undef, i32 undef, i1 false) + call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx1i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx2i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx4i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx8i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx16i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx32i16( undef, undef, i32 undef, i1 false) + call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx1i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx2i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx4i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx8i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx16i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx32i16( undef, undef, i32 undef, i1 false) + call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx1i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx2i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx4i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx8i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx16i32( undef, undef, i32 undef, i1 false) + call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx1i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx2i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx4i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx8i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.ctlz.nvx16i64( undef, undef, i32 undef, i1 false) + ret void +} + +define void @vp_cttz() { +; CHECK-LABEL: 'vp_cttz' +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %1 = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %2 = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %3 = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %5 = call @llvm.vp.cttz.nxv1i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %6 = call @llvm.vp.cttz.nxv2i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %7 = call @llvm.vp.cttz.nxv4i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %8 = call @llvm.vp.cttz.nxv8i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %9 = call @llvm.vp.cttz.nxv16i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %10 = call @llvm.vp.cttz.nxv32i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %11 = call @llvm.vp.cttz.nxv64i8( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %12 = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %13 = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %14 = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %15 = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %16 = call @llvm.vp.cttz.nxv1i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %17 = call @llvm.vp.cttz.nxv2i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %18 = call @llvm.vp.cttz.nxv4i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %19 = call @llvm.vp.cttz.nxv8i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %20 = call @llvm.vp.cttz.nxv16i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %21 = call @llvm.vp.cttz.nxv32i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %22 = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %23 = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %24 = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %25 = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %26 = call @llvm.vp.cttz.nxv1i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %27 = call @llvm.vp.cttz.nxv2i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %28 = call @llvm.vp.cttz.nxv4i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %29 = call @llvm.vp.cttz.nxv8i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %30 = call @llvm.vp.cttz.nxv16i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %31 = call @llvm.vp.cttz.nxv32i16( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %32 = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %33 = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %34 = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %35 = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %36 = call @llvm.vp.cttz.nxv1i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %37 = call @llvm.vp.cttz.nxv2i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %38 = call @llvm.vp.cttz.nxv4i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %39 = call @llvm.vp.cttz.nxv8i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %40 = call @llvm.vp.cttz.nxv16i32( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %41 = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> undef, <2 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %42 = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> undef, <4 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %43 = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> undef, <8 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %44 = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> undef, <16 x i1> undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %45 = call @llvm.vp.cttz.nxv1i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %46 = call @llvm.vp.cttz.nxv2i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %47 = call @llvm.vp.cttz.nxv4i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 25 for instruction: %48 = call @llvm.vp.cttz.nxv8i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 50 for instruction: %49 = call @llvm.vp.cttz.nxv16i64( undef, undef, i32 undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx1i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx2i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx4i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx8i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx16i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx32i8( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx64i8( undef, undef, i32 undef, i1 false) + call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx1i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx2i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx4i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx8i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx16i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx32i16( undef, undef, i32 undef, i1 false) + call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx1i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx2i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx4i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx8i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx16i16( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx32i16( undef, undef, i32 undef, i1 false) + call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx1i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx2i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx4i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx8i32( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx16i32( undef, undef, i32 undef, i1 false) + call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> undef, <2 x i1> undef, i32 undef, i1 false) + call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> undef, <4 x i1> undef, i32 undef, i1 false) + call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> undef, <8 x i1> undef, i32 undef, i1 false) + call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> undef, <16 x i1> undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx1i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx2i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx4i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx8i64( undef, undef, i32 undef, i1 false) + call @llvm.vp.cttz.nvx16i64( undef, undef, i32 undef, i1 false) + ret void +} + declare i16 @llvm.bswap.i16(i16) declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>) declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>) @@ -559,3 +769,83 @@ declare @llvm.vp.ctpop.nvx2i64(, @llvm.vp.ctpop.nvx4i64(, , i32) declare @llvm.vp.ctpop.nvx8i64(, , i32) declare @llvm.vp.ctpop.nvx16i64(, , i32) + +declare <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8>, <2 x i1>, i32, i1 immarg) +declare <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8>, <4 x i1>, i32, i1 immarg) +declare <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8>, <8 x i1>, i32, i1 immarg) +declare <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.ctlz.nvx1i8(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx2i8(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx4i8(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx8i8(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx16i8(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx32i8(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx64i8(, , i32, i1 immarg) +declare <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16>, <2 x i1>, i32, i1 immarg) +declare <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16>, <4 x i1>, i32, i1 immarg) +declare <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16>, <8 x i1>, i32, i1 immarg) +declare <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.ctlz.nvx1i16(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx2i16(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx4i16(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx8i16(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx16i16(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx32i16(, , i32, i1 immarg) +declare <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32>, <2 x i1>, i32, i1 immarg) +declare <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32>, <4 x i1>, i32, i1 immarg) +declare <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32>, <8 x i1>, i32, i1 immarg) +declare <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.ctlz.nvx1i32(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx2i32(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx4i32(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx8i32(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx16i32(, , i32, i1 immarg) +declare <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64>, <2 x i1>, i32, i1 immarg) +declare <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64>, <4 x i1>, i32, i1 immarg) +declare <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64>, <8 x i1>, i32, i1 immarg) +declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.ctlz.nvx1i64(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx2i64(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx4i64(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx8i64(, , i32, i1 immarg) +declare @llvm.vp.ctlz.nvx16i64(, , i32, i1 immarg) + +declare <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8>, <2 x i1>, i32, i1 immarg) +declare <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8>, <4 x i1>, i32, i1 immarg) +declare <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8>, <8 x i1>, i32, i1 immarg) +declare <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.cttz.nvx1i8(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx2i8(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx4i8(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx8i8(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx16i8(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx32i8(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx64i8(, , i32, i1 immarg) +declare <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16>, <2 x i1>, i32, i1 immarg) +declare <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16>, <4 x i1>, i32, i1 immarg) +declare <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16>, <8 x i1>, i32, i1 immarg) +declare <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.cttz.nvx1i16(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx2i16(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx4i16(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx8i16(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx16i16(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx32i16(, , i32, i1 immarg) +declare <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32>, <2 x i1>, i32, i1 immarg) +declare <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32>, <4 x i1>, i32, i1 immarg) +declare <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32>, <8 x i1>, i32, i1 immarg) +declare <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.cttz.nvx1i32(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx2i32(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx4i32(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx8i32(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx16i32(, , i32, i1 immarg) +declare <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64>, <2 x i1>, i32, i1 immarg) +declare <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64>, <4 x i1>, i32, i1 immarg) +declare <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64>, <8 x i1>, i32, i1 immarg) +declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, <16 x i1>, i32, i1 immarg) +declare @llvm.vp.cttz.nvx1i64(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx2i64(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx4i64(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx8i64(, , i32, i1 immarg) +declare @llvm.vp.cttz.nvx16i64(, , i32, i1 immarg) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll new file mode 100644 index 0000000000000..c42ea18d2bd84 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -0,0 +1,7550 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.vp.ctlz.nxv1i8(, , i32, i1 immarg) + +define @vp_ctlz_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv1i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv1i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv2i8(, , i32, i1) + +define @vp_ctlz_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv4i8(, , i32, i1) + +define @vp_ctlz_nxv4i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv4i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv8i8(, , i32, i1) + +define @vp_ctlz_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv8i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv16i8(, , i32, i1) + +define @vp_ctlz_nxv16i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv16i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv32i8(, , i32, i1) + +define @vp_ctlz_nxv32i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv32i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv32i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv32i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv32i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv64i8(, , i32, i1) + +define @vp_ctlz_nxv64i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv64i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv64i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_nxv64i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv64i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv1i16(, , i32, i1) + +define @vp_ctlz_nxv1i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv1i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv1i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv1i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv2i16(, , i32, i1) + +define @vp_ctlz_nxv2i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv4i16(, , i32, i1) + +define @vp_ctlz_nxv4i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv4i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv8i16(, , i32, i1) + +define @vp_ctlz_nxv8i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv8i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv16i16(, , i32, i1) + +define @vp_ctlz_nxv16i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv16i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv32i16(, , i32, i1) + +define @vp_ctlz_nxv32i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv32i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv32i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv32i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv32i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv32i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv1i32(, , i32, i1) + +define @vp_ctlz_nxv1i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv1i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv1i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv1i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv2i32(, , i32, i1) + +define @vp_ctlz_nxv2i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv4i32(, , i32, i1) + +define @vp_ctlz_nxv4i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv4i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv8i32(, , i32, i1) + +define @vp_ctlz_nxv8i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv8i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv16i32(, , i32, i1) + +define @vp_ctlz_nxv16i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv16i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv1i64(, , i32, i1) + +define @vp_ctlz_nxv1i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI36_0) +; RV64-NEXT: ld a0, %lo(.LCPI36_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI36_1) +; RV64-NEXT: ld a1, %lo(.LCPI36_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI36_2) +; RV64-NEXT: ld a0, %lo(.LCPI36_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI36_3) +; RV64-NEXT: ld a1, %lo(.LCPI36_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv1i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv1i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv1i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI37_0) +; RV64-NEXT: ld a0, %lo(.LCPI37_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI37_1) +; RV64-NEXT: ld a1, %lo(.LCPI37_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI37_2) +; RV64-NEXT: ld a0, %lo(.LCPI37_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI37_3) +; RV64-NEXT: ld a1, %lo(.LCPI37_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv2i64(, , i32, i1) + +define @vp_ctlz_nxv2i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI38_0) +; RV64-NEXT: ld a0, %lo(.LCPI38_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI38_1) +; RV64-NEXT: ld a1, %lo(.LCPI38_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI38_2) +; RV64-NEXT: ld a0, %lo(.LCPI38_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI38_3) +; RV64-NEXT: ld a1, %lo(.LCPI38_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI39_0) +; RV64-NEXT: ld a0, %lo(.LCPI39_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI39_1) +; RV64-NEXT: ld a1, %lo(.LCPI39_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI39_2) +; RV64-NEXT: ld a0, %lo(.LCPI39_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI39_3) +; RV64-NEXT: ld a1, %lo(.LCPI39_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv4i64(, , i32, i1) + +define @vp_ctlz_nxv4i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI40_0) +; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI40_1) +; RV64-NEXT: ld a1, %lo(.LCPI40_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI40_2) +; RV64-NEXT: ld a0, %lo(.LCPI40_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI40_3) +; RV64-NEXT: ld a1, %lo(.LCPI40_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv4i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI41_0) +; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI41_1) +; RV64-NEXT: ld a1, %lo(.LCPI41_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI41_2) +; RV64-NEXT: ld a0, %lo(.LCPI41_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI41_3) +; RV64-NEXT: ld a1, %lo(.LCPI41_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv7i64(, , i32, i1) + +define @vp_ctlz_nxv7i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv7i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv7i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI42_0) +; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI42_1) +; RV64-NEXT: ld a1, %lo(.LCPI42_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI42_2) +; RV64-NEXT: ld a0, %lo(.LCPI42_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI42_3) +; RV64-NEXT: ld a1, %lo(.LCPI42_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv7i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv7i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv7i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv7i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI43_0) +; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI43_1) +; RV64-NEXT: ld a1, %lo(.LCPI43_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI43_2) +; RV64-NEXT: ld a0, %lo(.LCPI43_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI43_3) +; RV64-NEXT: ld a1, %lo(.LCPI43_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv7i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv8i64(, , i32, i1) + +define @vp_ctlz_nxv8i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI44_0) +; RV64-NEXT: ld a0, %lo(.LCPI44_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI44_1) +; RV64-NEXT: ld a1, %lo(.LCPI44_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI44_2) +; RV64-NEXT: ld a0, %lo(.LCPI44_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI44_3) +; RV64-NEXT: ld a1, %lo(.LCPI44_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv8i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI45_0) +; RV64-NEXT: ld a0, %lo(.LCPI45_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI45_1) +; RV64-NEXT: ld a1, %lo(.LCPI45_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI45_2) +; RV64-NEXT: ld a0, %lo(.LCPI45_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI45_3) +; RV64-NEXT: ld a1, %lo(.LCPI45_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.ctlz.nxv16i64(, , i32, i1) + +define @vp_ctlz_nxv16i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vmv1r.v v1, v0 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: srli a2, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a2 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 2, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 8, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 16, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsrl.vx v24, v16, a2, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vnot.v v16, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v16, v24, v16, v0.t +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v16, a3, v0.t +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB46_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB46_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: vmv1r.v v24, v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a2 +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vor.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a2, 32 +; RV64-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v16, v8, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI46_0) +; RV64-NEXT: ld a4, %lo(.LCPI46_0)(a3) +; RV64-NEXT: lui a3, %hi(.LCPI46_1) +; RV64-NEXT: ld a3, %lo(.LCPI46_1)(a3) +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vsub.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a5, %hi(.LCPI46_2) +; RV64-NEXT: ld a5, %lo(.LCPI46_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI46_3) +; RV64-NEXT: ld a6, %lo(.LCPI46_3)(a6) +; RV64-NEXT: vsrl.vi v8, v16, 4, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi t0, sp, 16 +; RV64-NEXT: vs8r.v v8, (t0) # Unknown-size Folded Spill +; RV64-NEXT: bltu a0, a1, .LBB46_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB46_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vor.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a4, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_nxv16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 2, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 8, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 16, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsrl.vx v24, v16, a2, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vnot.v v16, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v16, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v24, v24, v16, v0.t +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v24, a3, v0.t +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB47_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB47_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vx v24, v8, a2, v0.t +; RV32-NEXT: vor.vv v8, v8, v24, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v24, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v24, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_nxv16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 2, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 8, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 16, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: li a2, 32 +; RV64-NEXT: vsrl.vx v24, v16, a2, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI47_0) +; RV64-NEXT: ld a4, %lo(.LCPI47_0)(a3) +; RV64-NEXT: lui a3, %hi(.LCPI47_1) +; RV64-NEXT: ld a3, %lo(.LCPI47_1)(a3) +; RV64-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a4, v0.t +; RV64-NEXT: vsub.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v24, v16, a3, v0.t +; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vadd.vv v16, v24, v16, v0.t +; RV64-NEXT: lui a5, %hi(.LCPI47_2) +; RV64-NEXT: ld a5, %lo(.LCPI47_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI47_3) +; RV64-NEXT: ld a6, %lo(.LCPI47_3)(a6) +; RV64-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV64-NEXT: vadd.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v16, v16, a5, v0.t +; RV64-NEXT: vmul.vx v16, v16, a6, v0.t +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v16, v16, a7, v0.t +; RV64-NEXT: bltu a0, a1, .LBB47_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB47_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vx v24, v8, a2, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a4, v0.t +; RV64-NEXT: vsub.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v24, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v24, v8, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_ctlz_zero_undef_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv1i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv1i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv4i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv4i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv8i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv16i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v10, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv16i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v10 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv32i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v12, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv32i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv32i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv32i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v12 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv32i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv64i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctlz.nxv64i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv64i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_nxv64i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv64i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv1i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv1i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv1i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv1i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv2i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv4i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv4i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv8i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv8i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv16i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv16i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv32i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv32i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv32i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv32i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv32i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv32i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv1i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv1i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv1i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv1i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv2i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv4i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv4i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv8i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv8i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv16i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv16i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv1i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI84_0) +; RV64-NEXT: ld a0, %lo(.LCPI84_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI84_1) +; RV64-NEXT: ld a1, %lo(.LCPI84_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI84_2) +; RV64-NEXT: ld a0, %lo(.LCPI84_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI84_3) +; RV64-NEXT: ld a1, %lo(.LCPI84_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv1i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv1i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv1i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv1i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI85_0) +; RV64-NEXT: ld a0, %lo(.LCPI85_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI85_1) +; RV64-NEXT: ld a1, %lo(.LCPI85_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: lui a0, %hi(.LCPI85_2) +; RV64-NEXT: ld a0, %lo(.LCPI85_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI85_3) +; RV64-NEXT: ld a1, %lo(.LCPI85_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv1i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv2i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI86_0) +; RV64-NEXT: ld a0, %lo(.LCPI86_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI86_1) +; RV64-NEXT: ld a1, %lo(.LCPI86_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI86_2) +; RV64-NEXT: ld a0, %lo(.LCPI86_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI86_3) +; RV64-NEXT: ld a1, %lo(.LCPI86_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv2i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI87_0) +; RV64-NEXT: ld a0, %lo(.LCPI87_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI87_1) +; RV64-NEXT: ld a1, %lo(.LCPI87_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: lui a0, %hi(.LCPI87_2) +; RV64-NEXT: ld a0, %lo(.LCPI87_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI87_3) +; RV64-NEXT: ld a1, %lo(.LCPI87_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv2i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv4i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI88_0) +; RV64-NEXT: ld a0, %lo(.LCPI88_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI88_1) +; RV64-NEXT: ld a1, %lo(.LCPI88_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI88_2) +; RV64-NEXT: ld a0, %lo(.LCPI88_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI88_3) +; RV64-NEXT: ld a1, %lo(.LCPI88_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv4i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv4i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI89_0) +; RV64-NEXT: ld a0, %lo(.LCPI89_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI89_1) +; RV64-NEXT: ld a1, %lo(.LCPI89_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: lui a0, %hi(.LCPI89_2) +; RV64-NEXT: ld a0, %lo(.LCPI89_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI89_3) +; RV64-NEXT: ld a1, %lo(.LCPI89_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv4i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv7i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv7i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv7i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI90_0) +; RV64-NEXT: ld a0, %lo(.LCPI90_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI90_1) +; RV64-NEXT: ld a1, %lo(.LCPI90_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI90_2) +; RV64-NEXT: ld a0, %lo(.LCPI90_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI90_3) +; RV64-NEXT: ld a1, %lo(.LCPI90_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv7i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv7i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv7i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv7i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI91_0) +; RV64-NEXT: ld a0, %lo(.LCPI91_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI91_1) +; RV64-NEXT: ld a1, %lo(.LCPI91_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI91_2) +; RV64-NEXT: ld a0, %lo(.LCPI91_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI91_3) +; RV64-NEXT: ld a1, %lo(.LCPI91_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv7i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_ctlz_zero_undef_nxv8i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI92_0) +; RV64-NEXT: ld a0, %lo(.LCPI92_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI92_1) +; RV64-NEXT: ld a1, %lo(.LCPI92_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI92_2) +; RV64-NEXT: ld a0, %lo(.LCPI92_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI92_3) +; RV64-NEXT: ld a1, %lo(.LCPI92_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv8i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv8i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI93_0) +; RV64-NEXT: ld a0, %lo(.LCPI93_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI93_1) +; RV64-NEXT: ld a1, %lo(.LCPI93_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI93_2) +; RV64-NEXT: ld a0, %lo(.LCPI93_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI93_3) +; RV64-NEXT: ld a1, %lo(.LCPI93_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv8i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv16i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vmv1r.v v1, v0 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: srli a2, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a2 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 2, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 8, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 16, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsrl.vx v24, v16, a2, v0.t +; RV32-NEXT: vor.vv v16, v16, v24, v0.t +; RV32-NEXT: vnot.v v16, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v16, v24, v16, v0.t +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v16, a3, v0.t +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB94_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB94_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: vmv1r.v v24, v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a2 +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vor.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a2, 32 +; RV64-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v16, v8, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI94_0) +; RV64-NEXT: ld a4, %lo(.LCPI94_0)(a3) +; RV64-NEXT: lui a3, %hi(.LCPI94_1) +; RV64-NEXT: ld a3, %lo(.LCPI94_1)(a3) +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vsub.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a5, %hi(.LCPI94_2) +; RV64-NEXT: ld a5, %lo(.LCPI94_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI94_3) +; RV64-NEXT: ld a6, %lo(.LCPI94_3)(a6) +; RV64-NEXT: vsrl.vi v8, v16, 4, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi t0, sp, 16 +; RV64-NEXT: vs8r.v v8, (t0) # Unknown-size Folded Spill +; RV64-NEXT: bltu a0, a1, .LBB94_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB94_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vor.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a4, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call @llvm.vp.ctlz.nxv16i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_ctlz_zero_undef_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_nxv16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 5 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v16, 1 +; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vi v24, v16, 2 +; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vi v24, v16, 4 +; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vi v24, v16, 8 +; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vsrl.vi v24, v16, 16 +; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsrl.vx v24, v16, a2 +; RV32-NEXT: vor.vv v16, v16, v24 +; RV32-NEXT: vnot.v v16, v16 +; RV32-NEXT: vsrl.vi v24, v16, 1 +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v0, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v0, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vsub.vv v16, v16, v24 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v0, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v16, v0 +; RV32-NEXT: vsrl.vi v16, v16, 2 +; RV32-NEXT: vand.vv v16, v16, v0 +; RV32-NEXT: vadd.vv v16, v24, v16 +; RV32-NEXT: vsrl.vi v24, v16, 4 +; RV32-NEXT: vadd.vv v16, v16, v24 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v16, v24 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v24, v24, v16 +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v24, a3 +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB95_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB95_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vsrl.vi v24, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vsrl.vi v24, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vsrl.vi v24, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vsrl.vx v24, v8, a2 +; RV32-NEXT: vor.vv v8, v8, v24 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v24, v8, 1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: vsub.vv v8, v8, v24 +; RV32-NEXT: vand.vv v24, v8, v0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v0 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: vsrl.vx v8, v8, a3 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_nxv16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 2 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 8 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 16 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: li a2, 32 +; RV64-NEXT: vsrl.vx v24, v16, a2 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vnot.v v16, v16 +; RV64-NEXT: lui a3, %hi(.LCPI95_0) +; RV64-NEXT: ld a3, %lo(.LCPI95_0)(a3) +; RV64-NEXT: lui a4, %hi(.LCPI95_1) +; RV64-NEXT: ld a4, %lo(.LCPI95_1)(a4) +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vand.vx v24, v24, a3 +; RV64-NEXT: vsub.vv v16, v16, v24 +; RV64-NEXT: vand.vx v24, v16, a4 +; RV64-NEXT: vsrl.vi v16, v16, 2 +; RV64-NEXT: vand.vx v16, v16, a4 +; RV64-NEXT: vadd.vv v16, v24, v16 +; RV64-NEXT: lui a5, %hi(.LCPI95_2) +; RV64-NEXT: ld a5, %lo(.LCPI95_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI95_3) +; RV64-NEXT: ld a6, %lo(.LCPI95_3)(a6) +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vadd.vv v16, v16, v24 +; RV64-NEXT: vand.vx v16, v16, a5 +; RV64-NEXT: vmul.vx v16, v16, a6 +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v16, v16, a7 +; RV64-NEXT: bltu a0, a1, .LBB95_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB95_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vx v24, v8, a2 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vand.vx v24, v24, a3 +; RV64-NEXT: vsub.vv v8, v8, v24 +; RV64-NEXT: vand.vx v24, v8, a4 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a4 +; RV64-NEXT: vadd.vv v8, v24, v8 +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v24 +; RV64-NEXT: vand.vx v8, v8, a5 +; RV64-NEXT: vmul.vx v8, v8, a6 +; RV64-NEXT: vsrl.vx v8, v8, a7 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctlz.nxv16i64( %va, %m, i32 %evl, i1 true) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll new file mode 100644 index 0000000000000..2a5bb985d38ee --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll @@ -0,0 +1,6386 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.vp.cttz.nxv1i8(, , i32, i1 immarg) + +define @vp_cttz_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv1i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv1i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv2i8(, , i32, i1) + +define @vp_cttz_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv4i8(, , i32, i1) + +define @vp_cttz_nxv4i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv4i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv8i8(, , i32, i1) + +define @vp_cttz_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv8i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv16i8(, , i32, i1) + +define @vp_cttz_nxv16i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsub.vx v10, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv16i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsub.vx v10, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv32i8(, , i32, i1) + +define @vp_cttz_nxv32i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsub.vx v12, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv32i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv32i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv32i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsub.vx v12, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv32i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv64i8(, , i32, i1) + +define @vp_cttz_nxv64i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsub.vx v16, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv64i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv64i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_nxv64i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsub.vx v16, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv64i8( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv1i16(, , i32, i1) + +define @vp_cttz_nxv1i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv1i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv1i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv1i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv2i16(, , i32, i1) + +define @vp_cttz_nxv2i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv4i16(, , i32, i1) + +define @vp_cttz_nxv4i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv4i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv8i16(, , i32, i1) + +define @vp_cttz_nxv8i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv8i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv16i16(, , i32, i1) + +define @vp_cttz_nxv16i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv16i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv32i16(, , i32, i1) + +define @vp_cttz_nxv32i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv32i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv32i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv32i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv32i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv32i16( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv1i32(, , i32, i1) + +define @vp_cttz_nxv1i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv1i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv1i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv1i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv2i32(, , i32, i1) + +define @vp_cttz_nxv2i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv4i32(, , i32, i1) + +define @vp_cttz_nxv4i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv4i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv8i32(, , i32, i1) + +define @vp_cttz_nxv8i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv8i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv16i32(, , i32, i1) + +define @vp_cttz_nxv16i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv16i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i32( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv1i64(, , i32, i1) + +define @vp_cttz_nxv1i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI36_0) +; RV64-NEXT: ld a0, %lo(.LCPI36_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI36_1) +; RV64-NEXT: ld a1, %lo(.LCPI36_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI36_2) +; RV64-NEXT: ld a0, %lo(.LCPI36_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI36_3) +; RV64-NEXT: ld a1, %lo(.LCPI36_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv1i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv1i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv1i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI37_0) +; RV64-NEXT: ld a0, %lo(.LCPI37_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI37_1) +; RV64-NEXT: ld a1, %lo(.LCPI37_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI37_2) +; RV64-NEXT: ld a0, %lo(.LCPI37_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI37_3) +; RV64-NEXT: ld a1, %lo(.LCPI37_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv2i64(, , i32, i1) + +define @vp_cttz_nxv2i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI38_0) +; RV64-NEXT: ld a0, %lo(.LCPI38_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI38_1) +; RV64-NEXT: ld a1, %lo(.LCPI38_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI38_2) +; RV64-NEXT: ld a0, %lo(.LCPI38_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI38_3) +; RV64-NEXT: ld a1, %lo(.LCPI38_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI39_0) +; RV64-NEXT: ld a0, %lo(.LCPI39_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI39_1) +; RV64-NEXT: ld a1, %lo(.LCPI39_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI39_2) +; RV64-NEXT: ld a0, %lo(.LCPI39_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI39_3) +; RV64-NEXT: ld a1, %lo(.LCPI39_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv4i64(, , i32, i1) + +define @vp_cttz_nxv4i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI40_0) +; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI40_1) +; RV64-NEXT: ld a1, %lo(.LCPI40_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI40_2) +; RV64-NEXT: ld a0, %lo(.LCPI40_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI40_3) +; RV64-NEXT: ld a1, %lo(.LCPI40_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv4i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI41_0) +; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI41_1) +; RV64-NEXT: ld a1, %lo(.LCPI41_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI41_2) +; RV64-NEXT: ld a0, %lo(.LCPI41_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI41_3) +; RV64-NEXT: ld a1, %lo(.LCPI41_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv7i64(, , i32, i1) + +define @vp_cttz_nxv7i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv7i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv7i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI42_0) +; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI42_1) +; RV64-NEXT: ld a1, %lo(.LCPI42_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI42_2) +; RV64-NEXT: ld a0, %lo(.LCPI42_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI42_3) +; RV64-NEXT: ld a1, %lo(.LCPI42_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv7i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv7i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv7i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv7i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI43_0) +; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI43_1) +; RV64-NEXT: ld a1, %lo(.LCPI43_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI43_2) +; RV64-NEXT: ld a0, %lo(.LCPI43_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI43_3) +; RV64-NEXT: ld a1, %lo(.LCPI43_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv7i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv8i64(, , i32, i1) + +define @vp_cttz_nxv8i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI44_0) +; RV64-NEXT: ld a0, %lo(.LCPI44_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI44_1) +; RV64-NEXT: ld a1, %lo(.LCPI44_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI44_2) +; RV64-NEXT: ld a0, %lo(.LCPI44_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI44_3) +; RV64-NEXT: ld a1, %lo(.LCPI44_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv8i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI45_0) +; RV64-NEXT: ld a0, %lo(.LCPI45_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI45_1) +; RV64-NEXT: ld a1, %lo(.LCPI45_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI45_2) +; RV64-NEXT: ld a0, %lo(.LCPI45_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI45_3) +; RV64-NEXT: ld a1, %lo(.LCPI45_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +declare @llvm.vp.cttz.nxv16i64(, , i32, i1) + +define @vp_cttz_nxv16i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vmv1r.v v1, v0 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: srli a2, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a2 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: li a2, 1 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v24, v16, a2, v0.t +; RV32-NEXT: vnot.v v16, v16, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v16, v24, v16, v0.t +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v16, a3, v0.t +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB46_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB46_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vsub.vx v16, v8, a2, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: vmv1r.v v24, v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a2 +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a3, a3, a2 +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v8, v16, a2, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI46_0) +; RV64-NEXT: ld a4, %lo(.LCPI46_0)(a3) +; RV64-NEXT: lui a3, %hi(.LCPI46_1) +; RV64-NEXT: ld a3, %lo(.LCPI46_1)(a3) +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vsub.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a5, %hi(.LCPI46_2) +; RV64-NEXT: ld a5, %lo(.LCPI46_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI46_3) +; RV64-NEXT: ld a6, %lo(.LCPI46_3)(a6) +; RV64-NEXT: vsrl.vi v8, v16, 4, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi t0, sp, 16 +; RV64-NEXT: vs8r.v v8, (t0) # Unknown-size Folded Spill +; RV64-NEXT: bltu a0, a1, .LBB46_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB46_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsub.vx v16, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a4, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_nxv16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: li a2, 1 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v24, v16, a2, v0.t +; RV32-NEXT: vnot.v v16, v16, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v16, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v24, v24, v16, v0.t +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v24, a3, v0.t +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB47_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB47_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v24, v8, a2, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v24, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v24, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_nxv16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a1, zero, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a3, a3, a2 +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v16, a2, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vv v16, v16, v24, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI47_0) +; RV64-NEXT: ld a4, %lo(.LCPI47_0)(a3) +; RV64-NEXT: lui a3, %hi(.LCPI47_1) +; RV64-NEXT: ld a3, %lo(.LCPI47_1)(a3) +; RV64-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a4, v0.t +; RV64-NEXT: vsub.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v24, v16, a3, v0.t +; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vadd.vv v16, v24, v16, v0.t +; RV64-NEXT: lui a5, %hi(.LCPI47_2) +; RV64-NEXT: ld a5, %lo(.LCPI47_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI47_3) +; RV64-NEXT: ld a6, %lo(.LCPI47_3)(a6) +; RV64-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV64-NEXT: vadd.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v16, v16, a5, v0.t +; RV64-NEXT: vmul.vx v16, v16, a6, v0.t +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v16, v16, a7, v0.t +; RV64-NEXT: bltu a0, a1, .LBB47_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB47_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a4, v0.t +; RV64-NEXT: vsub.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v24, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v24, v8, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: ret + %head = insertelement poison, i1 false, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i64( %va, %m, i32 %evl, i1 false) + ret %v +} + +define @vp_cttz_zero_undef_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv1i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv1i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv4i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv4i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv8i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv16i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsub.vx v10, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v10, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv16i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsub.vx v10, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v10 +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv32i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsub.vx v12, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv32i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv32i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv32i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsub.vx v12, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv32i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv64i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsub.vx v16, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.cttz.nxv64i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv64i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_nxv64i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsub.vx v16, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv64i8( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv1i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv1i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv1i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv1i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv2i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv4i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv4i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv8i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv8i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv16i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv16i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv32i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv32i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv32i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv32i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv32i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv32i16( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv1i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv1i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv1i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv1i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv2i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv4i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv4i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv8i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv8i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv16i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv16i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i32( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv1i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI84_0) +; RV64-NEXT: ld a0, %lo(.LCPI84_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI84_1) +; RV64-NEXT: ld a1, %lo(.LCPI84_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI84_2) +; RV64-NEXT: ld a0, %lo(.LCPI84_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI84_3) +; RV64-NEXT: ld a1, %lo(.LCPI84_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv1i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv1i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv1i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv1i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: lui a0, %hi(.LCPI85_0) +; RV64-NEXT: ld a0, %lo(.LCPI85_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI85_1) +; RV64-NEXT: ld a1, %lo(.LCPI85_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: lui a0, %hi(.LCPI85_2) +; RV64-NEXT: ld a0, %lo(.LCPI85_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI85_3) +; RV64-NEXT: ld a1, %lo(.LCPI85_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv1i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv2i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI86_0) +; RV64-NEXT: ld a0, %lo(.LCPI86_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI86_1) +; RV64-NEXT: ld a1, %lo(.LCPI86_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI86_2) +; RV64-NEXT: ld a0, %lo(.LCPI86_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI86_3) +; RV64-NEXT: ld a1, %lo(.LCPI86_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv2i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: lui a0, %hi(.LCPI87_0) +; RV64-NEXT: ld a0, %lo(.LCPI87_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI87_1) +; RV64-NEXT: ld a1, %lo(.LCPI87_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: lui a0, %hi(.LCPI87_2) +; RV64-NEXT: ld a0, %lo(.LCPI87_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI87_3) +; RV64-NEXT: ld a1, %lo(.LCPI87_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv2i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv4i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI88_0) +; RV64-NEXT: ld a0, %lo(.LCPI88_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI88_1) +; RV64-NEXT: ld a1, %lo(.LCPI88_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI88_2) +; RV64-NEXT: ld a0, %lo(.LCPI88_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI88_3) +; RV64-NEXT: ld a1, %lo(.LCPI88_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv4i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv4i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: lui a0, %hi(.LCPI89_0) +; RV64-NEXT: ld a0, %lo(.LCPI89_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI89_1) +; RV64-NEXT: ld a1, %lo(.LCPI89_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: lui a0, %hi(.LCPI89_2) +; RV64-NEXT: ld a0, %lo(.LCPI89_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI89_3) +; RV64-NEXT: ld a1, %lo(.LCPI89_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv4i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv7i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv7i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv7i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI90_0) +; RV64-NEXT: ld a0, %lo(.LCPI90_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI90_1) +; RV64-NEXT: ld a1, %lo(.LCPI90_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI90_2) +; RV64-NEXT: ld a0, %lo(.LCPI90_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI90_3) +; RV64-NEXT: ld a1, %lo(.LCPI90_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv7i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv7i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv7i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv7i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: lui a0, %hi(.LCPI91_0) +; RV64-NEXT: ld a0, %lo(.LCPI91_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI91_1) +; RV64-NEXT: ld a1, %lo(.LCPI91_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI91_2) +; RV64-NEXT: ld a0, %lo(.LCPI91_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI91_3) +; RV64-NEXT: ld a1, %lo(.LCPI91_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv7i64( %va, %m, i32 %evl, i1 true) + ret %v +} + + +define @vp_cttz_zero_undef_nxv8i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI92_0) +; RV64-NEXT: ld a0, %lo(.LCPI92_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI92_1) +; RV64-NEXT: ld a1, %lo(.LCPI92_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI92_2) +; RV64-NEXT: ld a0, %lo(.LCPI92_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI92_3) +; RV64-NEXT: ld a1, %lo(.LCPI92_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv8i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv8i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: lui a0, %hi(.LCPI93_0) +; RV64-NEXT: ld a0, %lo(.LCPI93_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI93_1) +; RV64-NEXT: ld a1, %lo(.LCPI93_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI93_2) +; RV64-NEXT: ld a0, %lo(.LCPI93_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI93_3) +; RV64-NEXT: ld a1, %lo(.LCPI93_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv8i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv16i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vmv1r.v v1, v0 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: srli a2, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a2 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: li a2, 1 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v24, v16, a2, v0.t +; RV32-NEXT: vnot.v v16, v16, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 5 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 3 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v16, v24, v16, v0.t +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v16, a3, v0.t +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB94_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB94_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: vsub.vx v16, v8, a2, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: vmv1r.v v24, v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a2 +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a3, a3, a2 +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v8, v16, a2, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI94_0) +; RV64-NEXT: ld a4, %lo(.LCPI94_0)(a3) +; RV64-NEXT: lui a3, %hi(.LCPI94_1) +; RV64-NEXT: ld a3, %lo(.LCPI94_1)(a3) +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vsub.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a5, %hi(.LCPI94_2) +; RV64-NEXT: ld a5, %lo(.LCPI94_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI94_3) +; RV64-NEXT: ld a6, %lo(.LCPI94_3)(a6) +; RV64-NEXT: vsrl.vi v8, v16, 4, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi t0, sp, 16 +; RV64-NEXT: vs8r.v v8, (t0) # Unknown-size Folded Spill +; RV64-NEXT: bltu a0, a1, .LBB94_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB94_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsub.vx v16, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a4, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a5, v0.t +; RV64-NEXT: vmul.vx v8, v8, a6, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a7, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call @llvm.vp.cttz.nxv16i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_cttz_zero_undef_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_nxv16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 5 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: li a2, 1 +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v24, v16, a2 +; RV32-NEXT: vnot.v v16, v16 +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsrl.vi v24, v16, 1 +; RV32-NEXT: addi a4, sp, 8 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v0, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 24 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v0, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vsub.vv v16, v16, v24 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v0, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v16, v0 +; RV32-NEXT: vsrl.vi v16, v16, 2 +; RV32-NEXT: vand.vv v16, v16, v0 +; RV32-NEXT: vadd.vv v16, v24, v16 +; RV32-NEXT: vsrl.vi v24, v16, 4 +; RV32-NEXT: vadd.vv v16, v16, v24 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: slli a5, a5, 4 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v16, v24 +; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a4), zero +; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v24, v24, v16 +; RV32-NEXT: li a3, 56 +; RV32-NEXT: vsrl.vx v16, v24, a3 +; RV32-NEXT: addi a4, sp, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB95_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB95_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v24, v8, a2 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v24 +; RV32-NEXT: vsrl.vi v24, v8, 1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: vsub.vv v8, v8, v24 +; RV32-NEXT: vand.vv v24, v8, v0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v0 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: vsrl.vx v8, v8, a3 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_nxv16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a3, a3, a2 +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v16, a2 +; RV64-NEXT: vnot.v v16, v16 +; RV64-NEXT: vand.vv v16, v16, v24 +; RV64-NEXT: lui a3, %hi(.LCPI95_0) +; RV64-NEXT: ld a3, %lo(.LCPI95_0)(a3) +; RV64-NEXT: lui a4, %hi(.LCPI95_1) +; RV64-NEXT: ld a4, %lo(.LCPI95_1)(a4) +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vand.vx v24, v24, a3 +; RV64-NEXT: vsub.vv v16, v16, v24 +; RV64-NEXT: vand.vx v24, v16, a4 +; RV64-NEXT: vsrl.vi v16, v16, 2 +; RV64-NEXT: vand.vx v16, v16, a4 +; RV64-NEXT: vadd.vv v16, v24, v16 +; RV64-NEXT: lui a5, %hi(.LCPI95_2) +; RV64-NEXT: ld a5, %lo(.LCPI95_2)(a5) +; RV64-NEXT: lui a6, %hi(.LCPI95_3) +; RV64-NEXT: ld a6, %lo(.LCPI95_3)(a6) +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vadd.vv v16, v16, v24 +; RV64-NEXT: vand.vx v16, v16, a5 +; RV64-NEXT: vmul.vx v16, v16, a6 +; RV64-NEXT: li a7, 56 +; RV64-NEXT: vsrl.vx v16, v16, a7 +; RV64-NEXT: bltu a0, a1, .LBB95_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB95_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v8, a2 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vand.vx v24, v24, a3 +; RV64-NEXT: vsub.vv v8, v8, v24 +; RV64-NEXT: vand.vx v24, v8, a4 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a4 +; RV64-NEXT: vadd.vv v8, v24, v8 +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v24 +; RV64-NEXT: vand.vx v8, v8, a5 +; RV64-NEXT: vmul.vx v8, v8, a6 +; RV64-NEXT: vsrl.vx v8, v8, a7 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.cttz.nxv16i64( %va, %m, i32 %evl, i1 true) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll new file mode 100644 index 0000000000000..969109bc5727e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll @@ -0,0 +1,6170 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8>, <2 x i1>, i32, i1 immarg) + +define <2 x i8> @vp_ctlz_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i8> %v +} + +define <2 x i8> @vp_ctlz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i8> %v +} + +declare <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8>, <4 x i1>, i32, i1 immarg) + +define <4 x i8> @vp_ctlz_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i8> %v +} + +define <4 x i8> @vp_ctlz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8>, <8 x i1>, i32, i1 immarg) + +define <8 x i8> @vp_ctlz_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i8> %v +} + +define <8 x i8> @vp_ctlz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i8> %v +} + +declare <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8>, <16 x i1>, i32, i1 immarg) + +define <16 x i8> @vp_ctlz_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i8> %v +} + +define <16 x i8> @vp_ctlz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_v16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i8> %v +} + +declare <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16>, <2 x i1>, i32, i1 immarg) + +define <2 x i16> @vp_ctlz_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i16> %v +} + +define <2 x i16> @vp_ctlz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i16> %v +} + +declare <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16>, <4 x i1>, i32, i1 immarg) + +define <4 x i16> @vp_ctlz_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i16> %v +} + +define <4 x i16> @vp_ctlz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16>, <8 x i1>, i32, i1 immarg) + +define <8 x i16> @vp_ctlz_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i16> %v +} + +define <8 x i16> @vp_ctlz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i16> %v +} + +declare <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16>, <16 x i1>, i32, i1 immarg) + +define <16 x i16> @vp_ctlz_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i16> %v +} + +define <16 x i16> @vp_ctlz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i16> %v +} + +declare <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32>, <2 x i1>, i32, i1 immarg) + +define <2 x i32> @vp_ctlz_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i32> %v +} + +define <2 x i32> @vp_ctlz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i32> %v +} + +declare <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32>, <4 x i1>, i32, i1 immarg) + +define <4 x i32> @vp_ctlz_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i32> %v +} + +define <4 x i32> @vp_ctlz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32>, <8 x i1>, i32, i1 immarg) + +define <8 x i32> @vp_ctlz_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i32> %v +} + +define <8 x i32> @vp_ctlz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i32> %v +} + +declare <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32>, <16 x i1>, i32, i1 immarg) + +define <16 x i32> @vp_ctlz_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i32> %v +} + +define <16 x i32> @vp_ctlz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i32> %v +} + +declare <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64>, <2 x i1>, i32, i1 immarg) + +define <2 x i64> @vp_ctlz_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v9, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI24_0) +; RV64-NEXT: ld a0, %lo(.LCPI24_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI24_1) +; RV64-NEXT: ld a1, %lo(.LCPI24_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI24_2) +; RV64-NEXT: ld a0, %lo(.LCPI24_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI24_3) +; RV64-NEXT: ld a1, %lo(.LCPI24_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i64> %v +} + +define <2 x i64> @vp_ctlz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v9, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI25_0) +; RV64-NEXT: ld a0, %lo(.LCPI25_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI25_1) +; RV64-NEXT: ld a1, %lo(.LCPI25_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI25_2) +; RV64-NEXT: ld a0, %lo(.LCPI25_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI25_3) +; RV64-NEXT: ld a1, %lo(.LCPI25_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64>, <4 x i1>, i32, i1 immarg) + +define <4 x i64> @vp_ctlz_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI26_0) +; RV64-NEXT: ld a0, %lo(.LCPI26_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI26_1) +; RV64-NEXT: ld a1, %lo(.LCPI26_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI26_2) +; RV64-NEXT: ld a0, %lo(.LCPI26_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI26_3) +; RV64-NEXT: ld a1, %lo(.LCPI26_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i64> %v +} + +define <4 x i64> @vp_ctlz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI27_0) +; RV64-NEXT: ld a0, %lo(.LCPI27_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI27_1) +; RV64-NEXT: ld a1, %lo(.LCPI27_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI27_2) +; RV64-NEXT: ld a0, %lo(.LCPI27_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI27_3) +; RV64-NEXT: ld a1, %lo(.LCPI27_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64>, <8 x i1>, i32, i1 immarg) + +define <8 x i64> @vp_ctlz_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI28_0) +; RV64-NEXT: ld a0, %lo(.LCPI28_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI28_1) +; RV64-NEXT: ld a1, %lo(.LCPI28_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI28_2) +; RV64-NEXT: ld a0, %lo(.LCPI28_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI28_3) +; RV64-NEXT: ld a1, %lo(.LCPI28_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i64> %v +} + +define <8 x i64> @vp_ctlz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI29_0) +; RV64-NEXT: ld a0, %lo(.LCPI29_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI29_1) +; RV64-NEXT: ld a1, %lo(.LCPI29_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI29_2) +; RV64-NEXT: ld a0, %lo(.LCPI29_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI29_3) +; RV64-NEXT: ld a1, %lo(.LCPI29_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i64> %v +} + +declare <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64>, <15 x i1>, i32, i1 immarg) + +define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v15i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v15i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI30_0) +; RV64-NEXT: ld a0, %lo(.LCPI30_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI30_1) +; RV64-NEXT: ld a1, %lo(.LCPI30_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI30_2) +; RV64-NEXT: ld a0, %lo(.LCPI30_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI30_3) +; RV64-NEXT: ld a1, %lo(.LCPI30_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 false) + ret <15 x i64> %v +} + +define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v15i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v15i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI31_0) +; RV64-NEXT: ld a0, %lo(.LCPI31_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI31_1) +; RV64-NEXT: ld a1, %lo(.LCPI31_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI31_2) +; RV64-NEXT: ld a0, %lo(.LCPI31_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI31_3) +; RV64-NEXT: ld a1, %lo(.LCPI31_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <15 x i1> poison, i1 false, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 false) + ret <15 x i64> %v +} + +declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, <16 x i1>, i32, i1 immarg) + +define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI32_0) +; RV64-NEXT: ld a0, %lo(.LCPI32_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI32_1) +; RV64-NEXT: ld a1, %lo(.LCPI32_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI32_2) +; RV64-NEXT: ld a0, %lo(.LCPI32_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI32_3) +; RV64-NEXT: ld a1, %lo(.LCPI32_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i64> %v +} + +define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI33_0) +; RV64-NEXT: ld a0, %lo(.LCPI33_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI33_1) +; RV64-NEXT: ld a1, %lo(.LCPI33_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI33_2) +; RV64-NEXT: ld a0, %lo(.LCPI33_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI33_3) +; RV64-NEXT: ld a1, %lo(.LCPI33_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i64> %v +} + +declare <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64>, <32 x i1>, i32, i1 immarg) + +define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 56 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 5 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vslidedown.vi v24, v0, 2 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB34_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB34_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 24 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: lui a3, 349525 +; RV32-NEXT: addi a3, a3, 1365 +; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 48 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: lui a3, 209715 +; RV32-NEXT: addi a3, a3, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 48 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a3, 61681 +; RV32-NEXT: addi a3, a3, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: lui a3, 4112 +; RV32-NEXT: addi a3, a3, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a3 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV32-NEXT: vor.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v24, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB34_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB34_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a1, 32 +; RV64-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a2, %hi(.LCPI34_0) +; RV64-NEXT: ld a3, %lo(.LCPI34_0)(a2) +; RV64-NEXT: lui a2, %hi(.LCPI34_1) +; RV64-NEXT: ld a2, %lo(.LCPI34_1)(a2) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI34_2) +; RV64-NEXT: ld a4, %lo(.LCPI34_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI34_3) +; RV64-NEXT: ld a5, %lo(.LCPI34_3)(a5) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, sp, 16 +; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v16, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v8, v16, 2, v0.t +; RV64-NEXT: vor.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a6, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 false) + ret <32 x i64> %v +} + +define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_v32i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 56 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB35_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB35_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a3, 349525 +; RV32-NEXT: addi a3, a3, 1365 +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 5 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a3, 209715 +; RV32-NEXT: addi a3, a3, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 24 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v24, v0.t +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a3, 61681 +; RV32-NEXT: addi a3, a3, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: lui a3, 4112 +; RV32-NEXT: addi a3, a3, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a3 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 48 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV32-NEXT: vor.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v24, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_v32i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB35_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB35_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: li a1, 32 +; RV64-NEXT: vsrl.vx v24, v8, a1, v0.t +; RV64-NEXT: vor.vv v8, v8, v24, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a2, %hi(.LCPI35_0) +; RV64-NEXT: ld a3, %lo(.LCPI35_0)(a2) +; RV64-NEXT: lui a2, %hi(.LCPI35_1) +; RV64-NEXT: ld a2, %lo(.LCPI35_1)(a2) +; RV64-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a3, v0.t +; RV64-NEXT: vsub.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v24, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v24, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI35_2) +; RV64-NEXT: ld a4, %lo(.LCPI35_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI35_3) +; RV64-NEXT: ld a5, %lo(.LCPI35_3)(a5) +; RV64-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 2, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 8, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 16, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vx v24, v16, a1, v0.t +; RV64-NEXT: vor.vv v16, v16, v24, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a3, v0.t +; RV64-NEXT: vsub.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v24, v16, a2, v0.t +; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV64-NEXT: vand.vx v16, v16, a2, v0.t +; RV64-NEXT: vadd.vv v16, v24, v16, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV64-NEXT: vadd.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v16, v16, a4, v0.t +; RV64-NEXT: vmul.vx v16, v16, a5, v0.t +; RV64-NEXT: vsrl.vx v16, v16, a6, v0.t +; RV64-NEXT: ret + %head = insertelement <32 x i1> poison, i1 false, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 false) + ret <32 x i64> %v +} + +define <2 x i8> @vp_ctlz_zero_undef_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i8> %v +} + +define <2 x i8> @vp_ctlz_zero_undef_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i8> @llvm.vp.ctlz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i8> %v +} + +define <4 x i8> @vp_ctlz_zero_undef_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i8> %v +} + +define <4 x i8> @vp_ctlz_zero_undef_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.ctlz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i8> %v +} + +define <8 x i8> @vp_ctlz_zero_undef_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i8> %v +} + +define <8 x i8> @vp_ctlz_zero_undef_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i8> @llvm.vp.ctlz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i8> %v +} + +define <16 x i8> @vp_ctlz_zero_undef_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i8> %v +} + +define <16 x i8> @vp_ctlz_zero_undef_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctlz_zero_undef_v16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 2 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i8> @llvm.vp.ctlz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i8> %v +} + +define <2 x i16> @vp_ctlz_zero_undef_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i16> %v +} + +define <2 x i16> @vp_ctlz_zero_undef_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i16> @llvm.vp.ctlz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i16> %v +} + +define <4 x i16> @vp_ctlz_zero_undef_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i16> %v +} + +define <4 x i16> @vp_ctlz_zero_undef_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.ctlz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i16> %v +} + +define <8 x i16> @vp_ctlz_zero_undef_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i16> %v +} + +define <8 x i16> @vp_ctlz_zero_undef_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i16> @llvm.vp.ctlz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i16> %v +} + +define <16 x i16> @vp_ctlz_zero_undef_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i16> %v +} + +define <16 x i16> @vp_ctlz_zero_undef_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i16> @llvm.vp.ctlz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i16> %v +} + +define <2 x i32> @vp_ctlz_zero_undef_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i32> %v +} + +define <2 x i32> @vp_ctlz_zero_undef_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i32> @llvm.vp.ctlz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i32> %v +} + +define <4 x i32> @vp_ctlz_zero_undef_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i32> %v +} + +define <4 x i32> @vp_ctlz_zero_undef_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.ctlz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i32> %v +} + +define <8 x i32> @vp_ctlz_zero_undef_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i32> %v +} + +define <8 x i32> @vp_ctlz_zero_undef_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i32> @llvm.vp.ctlz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i32> %v +} + +define <16 x i32> @vp_ctlz_zero_undef_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i32> %v +} + +define <16 x i32> @vp_ctlz_zero_undef_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i32> @llvm.vp.ctlz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i32> %v +} + +define <2 x i64> @vp_ctlz_zero_undef_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v9, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v9, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI60_0) +; RV64-NEXT: ld a0, %lo(.LCPI60_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI60_1) +; RV64-NEXT: ld a1, %lo(.LCPI60_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI60_2) +; RV64-NEXT: ld a0, %lo(.LCPI60_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI60_3) +; RV64-NEXT: ld a1, %lo(.LCPI60_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i64> %v +} + +define <2 x i64> @vp_ctlz_zero_undef_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v9, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v9 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v9, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v9, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v9 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI61_0) +; RV64-NEXT: ld a0, %lo(.LCPI61_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI61_1) +; RV64-NEXT: ld a1, %lo(.LCPI61_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: lui a0, %hi(.LCPI61_2) +; RV64-NEXT: ld a0, %lo(.LCPI61_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI61_3) +; RV64-NEXT: ld a1, %lo(.LCPI61_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i64> @llvm.vp.ctlz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i64> %v +} + +define <4 x i64> @vp_ctlz_zero_undef_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v10, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI62_0) +; RV64-NEXT: ld a0, %lo(.LCPI62_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI62_1) +; RV64-NEXT: ld a1, %lo(.LCPI62_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI62_2) +; RV64-NEXT: ld a0, %lo(.LCPI62_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI62_3) +; RV64-NEXT: ld a1, %lo(.LCPI62_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i64> %v +} + +define <4 x i64> @vp_ctlz_zero_undef_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v10, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v10 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v10, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v10 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI63_0) +; RV64-NEXT: ld a0, %lo(.LCPI63_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI63_1) +; RV64-NEXT: ld a1, %lo(.LCPI63_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: lui a0, %hi(.LCPI63_2) +; RV64-NEXT: ld a0, %lo(.LCPI63_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI63_3) +; RV64-NEXT: ld a1, %lo(.LCPI63_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.ctlz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i64> %v +} + +define <8 x i64> @vp_ctlz_zero_undef_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v12, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI64_0) +; RV64-NEXT: ld a0, %lo(.LCPI64_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI64_1) +; RV64-NEXT: ld a1, %lo(.LCPI64_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI64_2) +; RV64-NEXT: ld a0, %lo(.LCPI64_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI64_3) +; RV64-NEXT: ld a1, %lo(.LCPI64_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i64> %v +} + +define <8 x i64> @vp_ctlz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v12, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v12 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v12, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v12 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI65_0) +; RV64-NEXT: ld a0, %lo(.LCPI65_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI65_1) +; RV64-NEXT: ld a1, %lo(.LCPI65_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: lui a0, %hi(.LCPI65_2) +; RV64-NEXT: ld a0, %lo(.LCPI65_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI65_3) +; RV64-NEXT: ld a1, %lo(.LCPI65_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i64> @llvm.vp.ctlz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i64> %v +} + +define <15 x i64> @vp_ctlz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v15i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v15i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI66_0) +; RV64-NEXT: ld a0, %lo(.LCPI66_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI66_1) +; RV64-NEXT: ld a1, %lo(.LCPI66_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI66_2) +; RV64-NEXT: ld a0, %lo(.LCPI66_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI66_3) +; RV64-NEXT: ld a1, %lo(.LCPI66_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 true) + ret <15 x i64> %v +} + +define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v15i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v15i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI67_0) +; RV64-NEXT: ld a0, %lo(.LCPI67_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI67_1) +; RV64-NEXT: ld a1, %lo(.LCPI67_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI67_2) +; RV64-NEXT: ld a0, %lo(.LCPI67_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI67_3) +; RV64-NEXT: ld a1, %lo(.LCPI67_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 true) + ret <15 x i64> %v +} + +define <16 x i64> @vp_ctlz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI68_0) +; RV64-NEXT: ld a0, %lo(.LCPI68_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI68_1) +; RV64-NEXT: ld a1, %lo(.LCPI68_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI68_2) +; RV64-NEXT: ld a0, %lo(.LCPI68_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI68_3) +; RV64-NEXT: ld a1, %lo(.LCPI68_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i64> %v +} + +define <16 x i64> @vp_ctlz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vsrl.vi v16, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: li a0, 32 +; RV64-NEXT: vsrl.vx v16, v8, a0 +; RV64-NEXT: vor.vv v8, v8, v16 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a0, %hi(.LCPI69_0) +; RV64-NEXT: ld a0, %lo(.LCPI69_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI69_1) +; RV64-NEXT: ld a1, %lo(.LCPI69_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI69_2) +; RV64-NEXT: ld a0, %lo(.LCPI69_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI69_3) +; RV64-NEXT: ld a1, %lo(.LCPI69_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i64> %v +} + +define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 56 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 5 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vslidedown.vi v24, v0, 2 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB70_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB70_2: +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 24 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: lui a3, 349525 +; RV32-NEXT: addi a3, a3, 1365 +; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 48 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: lui a3, 209715 +; RV32-NEXT: addi a3, a3, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 48 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a3, 61681 +; RV32-NEXT: addi a3, a3, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: lui a3, 4112 +; RV32-NEXT: addi a3, a3, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a3 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV32-NEXT: vor.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: vor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v24, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB70_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB70_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: li a1, 32 +; RV64-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: lui a2, %hi(.LCPI70_0) +; RV64-NEXT: ld a3, %lo(.LCPI70_0)(a2) +; RV64-NEXT: lui a2, %hi(.LCPI70_1) +; RV64-NEXT: ld a2, %lo(.LCPI70_1)(a2) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI70_2) +; RV64-NEXT: ld a4, %lo(.LCPI70_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI70_3) +; RV64-NEXT: ld a5, %lo(.LCPI70_3)(a5) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, sp, 16 +; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vor.vv v16, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v8, v16, 2, v0.t +; RV64-NEXT: vor.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 8, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 16, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV64-NEXT: vor.vv v8, v8, v16, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a6, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 true) + ret <32 x i64> %v +} + +define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctlz_zero_undef_v32i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vmv8r.v v0, v16 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB71_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB71_2: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a3, 40 +; RV32-NEXT: mul a1, a1, a3 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v16 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 5 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a3, 349525 +; RV32-NEXT: addi a3, a3, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 24 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a3, 209715 +; RV32-NEXT: addi a3, a3, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a3 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: lui a3, 61681 +; RV32-NEXT: addi a3, a3, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v24 +; RV32-NEXT: lui a3, 4112 +; RV32-NEXT: addi a3, a3, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a3 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v8, a2 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v8, v0, 1 +; RV32-NEXT: vor.vv v8, v0, v8 +; RV32-NEXT: vsrl.vi v0, v8, 2 +; RV32-NEXT: vor.vv v8, v8, v0 +; RV32-NEXT: vsrl.vi v0, v8, 4 +; RV32-NEXT: vor.vv v8, v8, v0 +; RV32-NEXT: vsrl.vi v0, v8, 8 +; RV32-NEXT: vor.vv v8, v8, v0 +; RV32-NEXT: vsrl.vi v0, v8, 16 +; RV32-NEXT: vor.vv v8, v8, v0 +; RV32-NEXT: vsrl.vx v0, v8, a1 +; RV32-NEXT: vor.vv v8, v8, v0 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v8, v8, v0 +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v0, v0, v24 +; RV32-NEXT: vsub.vv v8, v8, v0 +; RV32-NEXT: vand.vv v0, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v0, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: vsrl.vx v16, v8, a2 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctlz_zero_undef_v32i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a2, 16 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB71_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB71_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 2 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 8 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vsrl.vi v24, v8, 16 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: li a1, 32 +; RV64-NEXT: vsrl.vx v24, v8, a1 +; RV64-NEXT: vor.vv v8, v8, v24 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: lui a2, %hi(.LCPI71_0) +; RV64-NEXT: ld a2, %lo(.LCPI71_0)(a2) +; RV64-NEXT: lui a3, %hi(.LCPI71_1) +; RV64-NEXT: ld a3, %lo(.LCPI71_1)(a3) +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vand.vx v24, v24, a2 +; RV64-NEXT: vsub.vv v8, v8, v24 +; RV64-NEXT: vand.vx v24, v8, a3 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a3 +; RV64-NEXT: vadd.vv v8, v24, v8 +; RV64-NEXT: lui a4, %hi(.LCPI71_2) +; RV64-NEXT: ld a4, %lo(.LCPI71_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI71_3) +; RV64-NEXT: ld a5, %lo(.LCPI71_3)(a5) +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v24 +; RV64-NEXT: vand.vx v8, v8, a4 +; RV64-NEXT: vmul.vx v8, v8, a5 +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6 +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 2 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 8 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 16 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vsrl.vx v24, v16, a1 +; RV64-NEXT: vor.vv v16, v16, v24 +; RV64-NEXT: vnot.v v16, v16 +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vand.vx v24, v24, a2 +; RV64-NEXT: vsub.vv v16, v16, v24 +; RV64-NEXT: vand.vx v24, v16, a3 +; RV64-NEXT: vsrl.vi v16, v16, 2 +; RV64-NEXT: vand.vx v16, v16, a3 +; RV64-NEXT: vadd.vv v16, v24, v16 +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vadd.vv v16, v16, v24 +; RV64-NEXT: vand.vx v16, v16, a4 +; RV64-NEXT: vmul.vx v16, v16, a5 +; RV64-NEXT: vsrl.vx v16, v16, a6 +; RV64-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x i64> @llvm.vp.ctlz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 true) + ret <32 x i64> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll new file mode 100644 index 0000000000000..f390e9b4610a4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll @@ -0,0 +1,5240 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8>, <2 x i1>, i32, i1 immarg) + +define <2 x i8> @vp_cttz_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i8> %v +} + +define <2 x i8> @vp_cttz_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i8> %v +} + +declare <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8>, <4 x i1>, i32, i1 immarg) + +define <4 x i8> @vp_cttz_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i8> %v +} + +define <4 x i8> @vp_cttz_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8>, <8 x i1>, i32, i1 immarg) + +define <8 x i8> @vp_cttz_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i8> %v +} + +define <8 x i8> @vp_cttz_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i8> %v +} + +declare <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8>, <16 x i1>, i32, i1 immarg) + +define <16 x i8> @vp_cttz_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i8> %v +} + +define <16 x i8> @vp_cttz_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_v16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i8> %v +} + +declare <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16>, <2 x i1>, i32, i1 immarg) + +define <2 x i16> @vp_cttz_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i16> %v +} + +define <2 x i16> @vp_cttz_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i16> %v +} + +declare <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16>, <4 x i1>, i32, i1 immarg) + +define <4 x i16> @vp_cttz_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i16> %v +} + +define <4 x i16> @vp_cttz_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16>, <8 x i1>, i32, i1 immarg) + +define <8 x i16> @vp_cttz_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i16> %v +} + +define <8 x i16> @vp_cttz_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i16> %v +} + +declare <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16>, <16 x i1>, i32, i1 immarg) + +define <16 x i16> @vp_cttz_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v16i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v16i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i16> %v +} + +define <16 x i16> @vp_cttz_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i16> %v +} + +declare <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32>, <2 x i1>, i32, i1 immarg) + +define <2 x i32> @vp_cttz_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i32> %v +} + +define <2 x i32> @vp_cttz_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i32> %v +} + +declare <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32>, <4 x i1>, i32, i1 immarg) + +define <4 x i32> @vp_cttz_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i32> %v +} + +define <4 x i32> @vp_cttz_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32>, <8 x i1>, i32, i1 immarg) + +define <8 x i32> @vp_cttz_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i32> %v +} + +define <8 x i32> @vp_cttz_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i32> %v +} + +declare <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32>, <16 x i1>, i32, i1 immarg) + +define <16 x i32> @vp_cttz_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v16i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v16i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i32> %v +} + +define <16 x i32> @vp_cttz_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i32> %v +} + +declare <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64>, <2 x i1>, i32, i1 immarg) + +define <2 x i64> @vp_cttz_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI24_0) +; RV64-NEXT: ld a0, %lo(.LCPI24_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI24_1) +; RV64-NEXT: ld a1, %lo(.LCPI24_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI24_2) +; RV64-NEXT: ld a0, %lo(.LCPI24_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI24_3) +; RV64-NEXT: ld a1, %lo(.LCPI24_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i64> %v +} + +define <2 x i64> @vp_cttz_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI25_0) +; RV64-NEXT: ld a0, %lo(.LCPI25_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI25_1) +; RV64-NEXT: ld a1, %lo(.LCPI25_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI25_2) +; RV64-NEXT: ld a0, %lo(.LCPI25_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI25_3) +; RV64-NEXT: ld a1, %lo(.LCPI25_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 false, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 false) + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64>, <4 x i1>, i32, i1 immarg) + +define <4 x i64> @vp_cttz_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI26_0) +; RV64-NEXT: ld a0, %lo(.LCPI26_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI26_1) +; RV64-NEXT: ld a1, %lo(.LCPI26_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI26_2) +; RV64-NEXT: ld a0, %lo(.LCPI26_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI26_3) +; RV64-NEXT: ld a1, %lo(.LCPI26_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i64> %v +} + +define <4 x i64> @vp_cttz_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI27_0) +; RV64-NEXT: ld a0, %lo(.LCPI27_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI27_1) +; RV64-NEXT: ld a1, %lo(.LCPI27_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI27_2) +; RV64-NEXT: ld a0, %lo(.LCPI27_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI27_3) +; RV64-NEXT: ld a1, %lo(.LCPI27_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 false, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 false) + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64>, <8 x i1>, i32, i1 immarg) + +define <8 x i64> @vp_cttz_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI28_0) +; RV64-NEXT: ld a0, %lo(.LCPI28_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI28_1) +; RV64-NEXT: ld a1, %lo(.LCPI28_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI28_2) +; RV64-NEXT: ld a0, %lo(.LCPI28_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI28_3) +; RV64-NEXT: ld a1, %lo(.LCPI28_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i64> %v +} + +define <8 x i64> @vp_cttz_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI29_0) +; RV64-NEXT: ld a0, %lo(.LCPI29_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI29_1) +; RV64-NEXT: ld a1, %lo(.LCPI29_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI29_2) +; RV64-NEXT: ld a0, %lo(.LCPI29_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI29_3) +; RV64-NEXT: ld a1, %lo(.LCPI29_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 false, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 false) + ret <8 x i64> %v +} + +declare <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64>, <15 x i1>, i32, i1 immarg) + +define <15 x i64> @vp_cttz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v15i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v15i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI30_0) +; RV64-NEXT: ld a0, %lo(.LCPI30_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI30_1) +; RV64-NEXT: ld a1, %lo(.LCPI30_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI30_2) +; RV64-NEXT: ld a0, %lo(.LCPI30_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI30_3) +; RV64-NEXT: ld a1, %lo(.LCPI30_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 false) + ret <15 x i64> %v +} + +define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v15i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v15i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI31_0) +; RV64-NEXT: ld a0, %lo(.LCPI31_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI31_1) +; RV64-NEXT: ld a1, %lo(.LCPI31_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI31_2) +; RV64-NEXT: ld a0, %lo(.LCPI31_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI31_3) +; RV64-NEXT: ld a1, %lo(.LCPI31_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <15 x i1> poison, i1 false, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 false) + ret <15 x i64> %v +} + +declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, <16 x i1>, i32, i1 immarg) + +define <16 x i64> @vp_cttz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI32_0) +; RV64-NEXT: ld a0, %lo(.LCPI32_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI32_1) +; RV64-NEXT: ld a1, %lo(.LCPI32_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI32_2) +; RV64-NEXT: ld a0, %lo(.LCPI32_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI32_3) +; RV64-NEXT: ld a1, %lo(.LCPI32_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i64> %v +} + +define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI33_0) +; RV64-NEXT: ld a0, %lo(.LCPI33_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI33_1) +; RV64-NEXT: ld a1, %lo(.LCPI33_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI33_2) +; RV64-NEXT: ld a0, %lo(.LCPI33_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI33_3) +; RV64-NEXT: ld a1, %lo(.LCPI33_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 false, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 false) + ret <16 x i64> %v +} + +declare <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64>, <32 x i1>, i32, i1 immarg) + +define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 6 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 64 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vslidedown.vi v24, v0, 2 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB34_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB34_2: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v8, v16, a1, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 56 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: li a3, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v8, -1 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 56 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: lui a4, 349525 +; RV32-NEXT: addi a4, a4, 1365 +; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 56 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 56 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: lui a4, 209715 +; RV32-NEXT: addi a4, a4, 819 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 56 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a4, 61681 +; RV32-NEXT: addi a4, a4, -241 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: lui a4, 4112 +; RV32-NEXT: addi a4, a4, 257 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a4 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 40 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vx v8, v16, a1, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v16, v16, v8, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 6 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v24, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB34_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB34_2: +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a1, %hi(.LCPI34_0) +; RV64-NEXT: ld a1, %lo(.LCPI34_0)(a1) +; RV64-NEXT: lui a3, %hi(.LCPI34_1) +; RV64-NEXT: ld a3, %lo(.LCPI34_1)(a3) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI34_2) +; RV64-NEXT: ld a4, %lo(.LCPI34_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI34_3) +; RV64-NEXT: ld a5, %lo(.LCPI34_3)(a5) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, sp, 16 +; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsub.vx v16, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a6, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 false) + ret <32 x i64> %v +} + +define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_v32i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 56 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vmclr.m v0 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB35_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB35_2: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a3, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a4, 349525 +; RV32-NEXT: addi a4, a4, 1365 +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a4, 209715 +; RV32-NEXT: addi a4, a4, 819 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v16, v16, v24, v0.t +; RV32-NEXT: lui a4, 61681 +; RV32-NEXT: addi a4, a4, -241 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: lui a4, 4112 +; RV32-NEXT: addi a4, a4, 257 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v16, v16, v8, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 48 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v24, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v24, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_v32i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vmclr.m v0 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB35_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB35_2: +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v24, v0.t +; RV64-NEXT: lui a1, %hi(.LCPI35_0) +; RV64-NEXT: ld a1, %lo(.LCPI35_0)(a1) +; RV64-NEXT: lui a3, %hi(.LCPI35_1) +; RV64-NEXT: ld a3, %lo(.LCPI35_1)(a3) +; RV64-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a1, v0.t +; RV64-NEXT: vsub.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v24, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v24, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI35_2) +; RV64-NEXT: ld a4, %lo(.LCPI35_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI35_3) +; RV64-NEXT: ld a5, %lo(.LCPI35_3)(a5) +; RV64-NEXT: vsrl.vi v24, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v24, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v16, a2, v0.t +; RV64-NEXT: vnot.v v16, v16, v0.t +; RV64-NEXT: vand.vv v16, v16, v24, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV64-NEXT: vand.vx v24, v24, a1, v0.t +; RV64-NEXT: vsub.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v24, v16, a3, v0.t +; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV64-NEXT: vand.vx v16, v16, a3, v0.t +; RV64-NEXT: vadd.vv v16, v24, v16, v0.t +; RV64-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV64-NEXT: vadd.vv v16, v16, v24, v0.t +; RV64-NEXT: vand.vx v16, v16, a4, v0.t +; RV64-NEXT: vmul.vx v16, v16, a5, v0.t +; RV64-NEXT: vsrl.vx v16, v16, a6, v0.t +; RV64-NEXT: ret + %head = insertelement <32 x i1> poison, i1 false, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 false) + ret <32 x i64> %v +} + +define <2 x i8> @vp_cttz_zero_undef_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i8> %v +} + +define <2 x i8> @vp_cttz_zero_undef_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i8> @llvm.vp.cttz.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i8> %v +} + +define <4 x i8> @vp_cttz_zero_undef_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i8> %v +} + +define <4 x i8> @vp_cttz_zero_undef_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.cttz.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i8> %v +} + +define <8 x i8> @vp_cttz_zero_undef_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i8> %v +} + +define <8 x i8> @vp_cttz_zero_undef_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i8> @llvm.vp.cttz.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i8> %v +} + +define <16 x i8> @vp_cttz_zero_undef_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1, v0.t +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: vand.vv v8, v8, v9, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i8> %v +} + +define <16 x i8> @vp_cttz_zero_undef_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_cttz_zero_undef_v16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 1 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsub.vx v9, v8, a1 +; CHECK-NEXT: vnot.v v8, v8 +; CHECK-NEXT: vand.vv v8, v8, v9 +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i8> @llvm.vp.cttz.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i8> %v +} + +define <2 x i16> @vp_cttz_zero_undef_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i16> %v +} + +define <2 x i16> @vp_cttz_zero_undef_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i16> @llvm.vp.cttz.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i16> %v +} + +define <4 x i16> @vp_cttz_zero_undef_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i16> %v +} + +define <4 x i16> @vp_cttz_zero_undef_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.cttz.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i16> %v +} + +define <8 x i16> @vp_cttz_zero_undef_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i16> %v +} + +define <8 x i16> @vp_cttz_zero_undef_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i16> @llvm.vp.cttz.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i16> %v +} + +define <16 x i16> @vp_cttz_zero_undef_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v16i16: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v16i16: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i16> %v +} + +define <16 x i16> @vp_cttz_zero_undef_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i16> @llvm.vp.cttz.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i16> %v +} + +define <2 x i32> @vp_cttz_zero_undef_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i32> %v +} + +define <2 x i32> @vp_cttz_zero_undef_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i32> @llvm.vp.cttz.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i32> %v +} + +define <4 x i32> @vp_cttz_zero_undef_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i32> %v +} + +define <4 x i32> @vp_cttz_zero_undef_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.cttz.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i32> %v +} + +define <8 x i32> @vp_cttz_zero_undef_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i32> %v +} + +define <8 x i32> @vp_cttz_zero_undef_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i32> @llvm.vp.cttz.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i32> %v +} + +define <16 x i32> @vp_cttz_zero_undef_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v16i32: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vnot.v v8, v8, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v16i32: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i32> %v +} + +define <16 x i32> @vp_cttz_zero_undef_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1 +; RV32-NEXT: vnot.v v8, v8 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i32> @llvm.vp.cttz.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i32> %v +} + +define <2 x i64> @vp_cttz_zero_undef_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI60_0) +; RV64-NEXT: ld a0, %lo(.LCPI60_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI60_1) +; RV64-NEXT: ld a1, %lo(.LCPI60_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI60_2) +; RV64-NEXT: ld a0, %lo(.LCPI60_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI60_3) +; RV64-NEXT: ld a1, %lo(.LCPI60_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i64> %v +} + +define <2 x i64> @vp_cttz_zero_undef_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsub.vx v9, v8, a1 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.i v10, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v10 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: vsub.vx v9, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v9 +; RV64-NEXT: lui a0, %hi(.LCPI61_0) +; RV64-NEXT: ld a0, %lo(.LCPI61_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI61_1) +; RV64-NEXT: ld a1, %lo(.LCPI61_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: lui a0, %hi(.LCPI61_2) +; RV64-NEXT: ld a0, %lo(.LCPI61_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI61_3) +; RV64-NEXT: ld a1, %lo(.LCPI61_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i64> @llvm.vp.cttz.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i64> %v +} + +define <4 x i64> @vp_cttz_zero_undef_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI62_0) +; RV64-NEXT: ld a0, %lo(.LCPI62_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI62_1) +; RV64-NEXT: ld a1, %lo(.LCPI62_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI62_2) +; RV64-NEXT: ld a0, %lo(.LCPI62_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI62_3) +; RV64-NEXT: ld a1, %lo(.LCPI62_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i64> %v +} + +define <4 x i64> @vp_cttz_zero_undef_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsub.vx v10, v8, a1 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.i v12, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v12 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: vsub.vx v10, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v10 +; RV64-NEXT: lui a0, %hi(.LCPI63_0) +; RV64-NEXT: ld a0, %lo(.LCPI63_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI63_1) +; RV64-NEXT: ld a1, %lo(.LCPI63_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: lui a0, %hi(.LCPI63_2) +; RV64-NEXT: ld a0, %lo(.LCPI63_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI63_3) +; RV64-NEXT: ld a1, %lo(.LCPI63_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.cttz.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i64> %v +} + +define <8 x i64> @vp_cttz_zero_undef_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1, v0.t +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI64_0) +; RV64-NEXT: ld a0, %lo(.LCPI64_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI64_1) +; RV64-NEXT: ld a1, %lo(.LCPI64_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI64_2) +; RV64-NEXT: ld a0, %lo(.LCPI64_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI64_3) +; RV64-NEXT: ld a1, %lo(.LCPI64_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i64> %v +} + +define <8 x i64> @vp_cttz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsub.vx v12, v8, a1 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.i v16, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v16 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: vsub.vx v12, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v12 +; RV64-NEXT: lui a0, %hi(.LCPI65_0) +; RV64-NEXT: ld a0, %lo(.LCPI65_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI65_1) +; RV64-NEXT: ld a1, %lo(.LCPI65_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: lui a0, %hi(.LCPI65_2) +; RV64-NEXT: ld a0, %lo(.LCPI65_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI65_3) +; RV64-NEXT: ld a1, %lo(.LCPI65_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i64> @llvm.vp.cttz.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i64> %v +} + +define <15 x i64> @vp_cttz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v15i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v15i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI66_0) +; RV64-NEXT: ld a0, %lo(.LCPI66_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI66_1) +; RV64-NEXT: ld a1, %lo(.LCPI66_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI66_2) +; RV64-NEXT: ld a0, %lo(.LCPI66_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI66_3) +; RV64-NEXT: ld a1, %lo(.LCPI66_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 true) + ret <15 x i64> %v +} + +define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v15i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v15i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: lui a0, %hi(.LCPI67_0) +; RV64-NEXT: ld a0, %lo(.LCPI67_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI67_1) +; RV64-NEXT: ld a1, %lo(.LCPI67_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI67_2) +; RV64-NEXT: ld a0, %lo(.LCPI67_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI67_3) +; RV64-NEXT: ld a1, %lo(.LCPI67_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 true) + ret <15 x i64> %v +} + +define <16 x i64> @vp_cttz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1, v0.t +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI68_0) +; RV64-NEXT: ld a0, %lo(.LCPI68_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI68_1) +; RV64-NEXT: ld a1, %lo(.LCPI68_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI68_2) +; RV64-NEXT: ld a0, %lo(.LCPI68_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI68_3) +; RV64-NEXT: ld a1, %lo(.LCPI68_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i64> %v +} + +define <16 x i64> @vp_cttz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a1 +; RV32-NEXT: li a1, 32 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a1, 1 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a1 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v16 +; RV64-NEXT: lui a0, %hi(.LCPI69_0) +; RV64-NEXT: ld a0, %lo(.LCPI69_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI69_1) +; RV64-NEXT: ld a1, %lo(.LCPI69_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI69_2) +; RV64-NEXT: ld a0, %lo(.LCPI69_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI69_3) +; RV64-NEXT: ld a1, %lo(.LCPI69_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i64> %v +} + +define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 6 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xc0, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 64 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: li a1, 16 +; RV32-NEXT: vslidedown.vi v24, v0, 2 +; RV32-NEXT: mv a2, a0 +; RV32-NEXT: bltu a0, a1, .LBB70_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: .LBB70_2: +; RV32-NEXT: li a1, 1 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v8, v16, a1, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: li a4, 56 +; RV32-NEXT: mul a3, a3, a4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: li a3, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v8, -1 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 56 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: lui a4, 349525 +; RV32-NEXT: addi a4, a4, 1365 +; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t +; RV32-NEXT: csrr a5, vlenb +; RV32-NEXT: li a6, 56 +; RV32-NEXT: mul a5, a5, a6 +; RV32-NEXT: add a5, sp, a5 +; RV32-NEXT: addi a5, a5, 16 +; RV32-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 56 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: lui a4, 209715 +; RV32-NEXT: addi a4, a4, 819 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 56 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v16, v8, v16, v0.t +; RV32-NEXT: lui a4, 61681 +; RV32-NEXT: addi a4, a4, -241 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 48 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: lui a4, 4112 +; RV32-NEXT: addi a4, a4, 257 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a4 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 4 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a3, 40 +; RV32-NEXT: mul a0, a0, a3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vx v8, v16, a1, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v16, v16, v8, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 6 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v24, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB70_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB70_2: +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v16, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a1, %hi(.LCPI70_0) +; RV64-NEXT: ld a1, %lo(.LCPI70_0)(a1) +; RV64-NEXT: lui a3, %hi(.LCPI70_1) +; RV64-NEXT: ld a3, %lo(.LCPI70_1)(a3) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI70_2) +; RV64-NEXT: ld a4, %lo(.LCPI70_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI70_3) +; RV64-NEXT: ld a5, %lo(.LCPI70_3)(a5) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, sp, 16 +; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsub.vx v16, v8, a2, v0.t +; RV64-NEXT: vnot.v v8, v8, v0.t +; RV64-NEXT: vand.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a3, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a6, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 true) + ret <32 x i64> %v +} + +define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_cttz_zero_undef_v32i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: li a2, 16 +; RV32-NEXT: vmv8r.v v0, v16 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB71_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB71_2: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a3, 40 +; RV32-NEXT: mul a2, a2, a3 +; RV32-NEXT: sub sp, sp, a2 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: li a2, 1 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vsub.vx v16, v8, a2 +; RV32-NEXT: li a3, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.i v24, -1 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vxor.vv v8, v8, v24 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a4, 349525 +; RV32-NEXT: addi a4, a4, 1365 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a4, 209715 +; RV32-NEXT: addi a4, a4, 819 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a4 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: lui a4, 61681 +; RV32-NEXT: addi a4, a4, -241 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a4 +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v24 +; RV32-NEXT: lui a4, 4112 +; RV32-NEXT: addi a4, a4, 257 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a4 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v24 +; RV32-NEXT: li a1, 56 +; RV32-NEXT: vsrl.vx v8, v8, a1 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: addi a3, a0, -16 +; RV32-NEXT: sltu a0, a0, a3 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a3 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vxor.vv v8, v0, v8 +; RV32-NEXT: vsub.vx v0, v0, a2 +; RV32-NEXT: vand.vv v8, v8, v0 +; RV32-NEXT: vsrl.vi v0, v8, 1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 24 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v0, v0, v24 +; RV32-NEXT: vsub.vv v8, v8, v0 +; RV32-NEXT: vand.vv v0, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v0, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_cttz_zero_undef_v32i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a2, 16 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB71_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB71_2: +; RV64-NEXT: li a2, 1 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v8, a2 +; RV64-NEXT: vnot.v v8, v8 +; RV64-NEXT: vand.vv v8, v8, v24 +; RV64-NEXT: lui a1, %hi(.LCPI71_0) +; RV64-NEXT: ld a1, %lo(.LCPI71_0)(a1) +; RV64-NEXT: lui a3, %hi(.LCPI71_1) +; RV64-NEXT: ld a3, %lo(.LCPI71_1)(a3) +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vand.vx v24, v24, a1 +; RV64-NEXT: vsub.vv v8, v8, v24 +; RV64-NEXT: vand.vx v24, v8, a3 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a3 +; RV64-NEXT: vadd.vv v8, v24, v8 +; RV64-NEXT: lui a4, %hi(.LCPI71_2) +; RV64-NEXT: ld a4, %lo(.LCPI71_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI71_3) +; RV64-NEXT: ld a5, %lo(.LCPI71_3)(a5) +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v24 +; RV64-NEXT: vand.vx v8, v8, a4 +; RV64-NEXT: vmul.vx v8, v8, a5 +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6 +; RV64-NEXT: addi a7, a0, -16 +; RV64-NEXT: sltu a0, a0, a7 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a7 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsub.vx v24, v16, a2 +; RV64-NEXT: vnot.v v16, v16 +; RV64-NEXT: vand.vv v16, v16, v24 +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vand.vx v24, v24, a1 +; RV64-NEXT: vsub.vv v16, v16, v24 +; RV64-NEXT: vand.vx v24, v16, a3 +; RV64-NEXT: vsrl.vi v16, v16, 2 +; RV64-NEXT: vand.vx v16, v16, a3 +; RV64-NEXT: vadd.vv v16, v24, v16 +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vadd.vv v16, v16, v24 +; RV64-NEXT: vand.vx v16, v16, a4 +; RV64-NEXT: vmul.vx v16, v16, a5 +; RV64-NEXT: vsrl.vx v16, v16, a6 +; RV64-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x i64> @llvm.vp.cttz.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 true) + ret <32 x i64> %v +} diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp index 5abead98aa96e..f63e9861b210d 100644 --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -152,6 +152,10 @@ class VPIntrinsicTest : public testing::Test { << "(<8 x i16>, <8 x i1>, i32) "; Str << " declare <8 x i16> @llvm.vp.ctpop.v8i16" << "(<8 x i16>, <8 x i1>, i32) "; + Str << " declare <8 x i16> @llvm.vp.ctlz.v8i16" + << "(<8 x i16>, <8 x i1>, i32, i1 immarg) "; + Str << " declare <8 x i16> @llvm.vp.cttz.v8i16" + << "(<8 x i16>, <8 x i1>, i32, i1 immarg) "; Str << " declare <8 x i16> @llvm.vp.fshl.v8i16" << "(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) "; Str << " declare <8 x i16> @llvm.vp.fshr.v8i16"