diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 878f7b3194830..4d0debd399e5f 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -126,6 +126,7 @@ class RISCVVIntrinsic { Intrinsic IntrinsicID = !cast(NAME); bits<4> ScalarOperand = NoScalarOperand; bits<5> VLOperand = NoVLOperand; + bit IsFPIntrinsic = 0; } let TargetPrefix = "riscv" in { @@ -1442,14 +1443,15 @@ let TargetPrefix = "riscv" in { defm vwmaccus : RISCVTernaryWide; defm vwmaccsu : RISCVTernaryWide; - defm vfadd : RISCVBinaryAAXRoundingMode; - defm vfsub : RISCVBinaryAAXRoundingMode; - defm vfrsub : RISCVBinaryAAXRoundingMode; - - defm vfwadd : RISCVBinaryABXRoundingMode; - defm vfwsub : RISCVBinaryABXRoundingMode; - defm vfwadd_w : RISCVBinaryAAXRoundingMode; - defm vfwsub_w : RISCVBinaryAAXRoundingMode; + let IsFPIntrinsic = 1 in { + defm vfadd : RISCVBinaryAAXRoundingMode; + defm vfsub : RISCVBinaryAAXRoundingMode; + defm vfrsub : RISCVBinaryAAXRoundingMode; + defm vfwadd : RISCVBinaryABXRoundingMode; + defm vfwsub : RISCVBinaryABXRoundingMode; + defm vfwadd_w : RISCVBinaryAAXRoundingMode; + defm vfwsub_w : RISCVBinaryAAXRoundingMode; + } defm vsaddu : RISCVSaturatingBinaryAAX; defm vsadd : RISCVSaturatingBinaryAAX; @@ -1484,6 +1486,7 @@ let TargetPrefix = "riscv" in { llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; + let IsFPIntrinsic = 1; } def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], @@ -1506,51 +1509,57 @@ let TargetPrefix = "riscv" in { llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; + let IsFPIntrinsic = 1; } - defm vfmul : RISCVBinaryAAXRoundingMode; - defm vfdiv : RISCVBinaryAAXRoundingMode; - defm vfrdiv : RISCVBinaryAAXRoundingMode; + let IsFPIntrinsic = 1 in { + defm vfmul : RISCVBinaryAAXRoundingMode; + defm vfdiv : RISCVBinaryAAXRoundingMode; + defm vfrdiv : RISCVBinaryAAXRoundingMode; - defm vfwmul : RISCVBinaryABXRoundingMode; + defm vfwmul : RISCVBinaryABXRoundingMode; - defm vfmacc : RISCVTernaryAAXARoundingMode; - defm vfnmacc : RISCVTernaryAAXARoundingMode; - defm vfmsac : RISCVTernaryAAXARoundingMode; - defm vfnmsac : RISCVTernaryAAXARoundingMode; - defm vfmadd : RISCVTernaryAAXARoundingMode; - defm vfnmadd : RISCVTernaryAAXARoundingMode; - defm vfmsub : RISCVTernaryAAXARoundingMode; - defm vfnmsub : RISCVTernaryAAXARoundingMode; + defm vfmacc : RISCVTernaryAAXARoundingMode; + defm vfnmacc : RISCVTernaryAAXARoundingMode; + defm vfmsac : RISCVTernaryAAXARoundingMode; + defm vfnmsac : RISCVTernaryAAXARoundingMode; + defm vfmadd : RISCVTernaryAAXARoundingMode; + defm vfnmadd : RISCVTernaryAAXARoundingMode; + defm vfmsub : RISCVTernaryAAXARoundingMode; + defm vfnmsub : RISCVTernaryAAXARoundingMode; - defm vfwmacc : RISCVTernaryWideRoundingMode; - defm vfwmaccbf16 : RISCVTernaryWideRoundingMode; - defm vfwnmacc : RISCVTernaryWideRoundingMode; - defm vfwmsac : RISCVTernaryWideRoundingMode; - defm vfwnmsac : RISCVTernaryWideRoundingMode; + defm vfwmacc : RISCVTernaryWideRoundingMode; + defm vfwmaccbf16 : RISCVTernaryWideRoundingMode; + defm vfwnmacc : RISCVTernaryWideRoundingMode; + defm vfwmsac : RISCVTernaryWideRoundingMode; + defm vfwnmsac : RISCVTernaryWideRoundingMode; - defm vfsqrt : RISCVUnaryAARoundingMode; - defm vfrsqrt7 : RISCVUnaryAA; - defm vfrec7 : RISCVUnaryAARoundingMode; + defm vfsqrt : RISCVUnaryAARoundingMode; + defm vfrsqrt7 : RISCVUnaryAA; + defm vfrec7 : RISCVUnaryAARoundingMode; - defm vfmin : RISCVBinaryAAX; - defm vfmax : RISCVBinaryAAX; + defm vfmin : RISCVBinaryAAX; + defm vfmax : RISCVBinaryAAX; - defm vfsgnj : RISCVBinaryAAX; - defm vfsgnjn : RISCVBinaryAAX; - defm vfsgnjx : RISCVBinaryAAX; + defm vfsgnj : RISCVBinaryAAX; + defm vfsgnjn : RISCVBinaryAAX; + defm vfsgnjx : RISCVBinaryAAX; - defm vfclass : RISCVClassify; + defm vfclass : RISCVClassify; - defm vfmerge : RISCVBinaryWithV0; + defm vfmerge : RISCVBinaryWithV0; + } defm vslideup : RVVSlide; defm vslidedown : RVVSlide; defm vslide1up : RISCVBinaryAAX; defm vslide1down : RISCVBinaryAAX; - defm vfslide1up : RISCVBinaryAAX; - defm vfslide1down : RISCVBinaryAAX; + + let IsFPIntrinsic = 1 in { + defm vfslide1up : RISCVBinaryAAX; + defm vfslide1down : RISCVBinaryAAX; + } defm vrgather_vv : RISCVRGatherVV; defm vrgather_vx : RISCVRGatherVX; @@ -1571,12 +1580,14 @@ let TargetPrefix = "riscv" in { defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode; defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode; - defm vmfeq : RISCVCompare; - defm vmfne : RISCVCompare; - defm vmflt : RISCVCompare; - defm vmfle : RISCVCompare; - defm vmfgt : RISCVCompare; - defm vmfge : RISCVCompare; + let IsFPIntrinsic = 1 in { + defm vmfeq : RISCVCompare; + defm vmfne : RISCVCompare; + defm vmflt : RISCVCompare; + defm vmfle : RISCVCompare; + defm vmfgt : RISCVCompare; + defm vmfge : RISCVCompare; + } defm vredsum : RISCVReduction; defm vredand : RISCVReduction; @@ -1590,13 +1601,15 @@ let TargetPrefix = "riscv" in { defm vwredsumu : RISCVReduction; defm vwredsum : RISCVReduction; - defm vfredosum : RISCVReductionRoundingMode; - defm vfredusum : RISCVReductionRoundingMode; - defm vfredmin : RISCVReduction; - defm vfredmax : RISCVReduction; + let IsFPIntrinsic = 1 in { + defm vfredosum : RISCVReductionRoundingMode; + defm vfredusum : RISCVReductionRoundingMode; + defm vfredmin : RISCVReduction; + defm vfredmax : RISCVReduction; - defm vfwredusum : RISCVReductionRoundingMode; - defm vfwredosum : RISCVReductionRoundingMode; + defm vfwredusum : RISCVReductionRoundingMode; + defm vfwredosum : RISCVReductionRoundingMode; + } def int_riscv_vmand: RISCVBinaryAAAUnMasked; def int_riscv_vmnand: RISCVBinaryAAAUnMasked; @@ -1615,31 +1628,33 @@ let TargetPrefix = "riscv" in { defm vmsof : RISCVMaskedUnaryMOut; defm vmsif : RISCVMaskedUnaryMOut; - defm vfcvt_xu_f_v : RISCVConversionRoundingMode; - defm vfcvt_x_f_v : RISCVConversionRoundingMode; - defm vfcvt_rtz_xu_f_v : RISCVConversion; - defm vfcvt_rtz_x_f_v : RISCVConversion; - defm vfcvt_f_xu_v : RISCVConversionRoundingMode; - defm vfcvt_f_x_v : RISCVConversionRoundingMode; - - defm vfwcvt_f_xu_v : RISCVConversion; - defm vfwcvt_f_x_v : RISCVConversion; - defm vfwcvt_xu_f_v : RISCVConversionRoundingMode; - defm vfwcvt_x_f_v : RISCVConversionRoundingMode; - defm vfwcvt_rtz_xu_f_v : RISCVConversion; - defm vfwcvt_rtz_x_f_v : RISCVConversion; - defm vfwcvt_f_f_v : RISCVConversion; - defm vfwcvtbf16_f_f_v : RISCVConversion; - - defm vfncvt_f_xu_w : RISCVConversionRoundingMode; - defm vfncvt_f_x_w : RISCVConversionRoundingMode; - defm vfncvt_xu_f_w : RISCVConversionRoundingMode; - defm vfncvt_x_f_w : RISCVConversionRoundingMode; - defm vfncvt_rtz_xu_f_w : RISCVConversion; - defm vfncvt_rtz_x_f_w : RISCVConversion; - defm vfncvt_f_f_w : RISCVConversionRoundingMode; - defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode; - defm vfncvt_rod_f_f_w : RISCVConversion; + let IsFPIntrinsic = 1 in { + defm vfcvt_xu_f_v : RISCVConversionRoundingMode; + defm vfcvt_x_f_v : RISCVConversionRoundingMode; + defm vfcvt_rtz_xu_f_v : RISCVConversion; + defm vfcvt_rtz_x_f_v : RISCVConversion; + defm vfcvt_f_xu_v : RISCVConversionRoundingMode; + defm vfcvt_f_x_v : RISCVConversionRoundingMode; + + defm vfwcvt_f_xu_v : RISCVConversion; + defm vfwcvt_f_x_v : RISCVConversion; + defm vfwcvt_xu_f_v : RISCVConversionRoundingMode; + defm vfwcvt_x_f_v : RISCVConversionRoundingMode; + defm vfwcvt_rtz_xu_f_v : RISCVConversion; + defm vfwcvt_rtz_x_f_v : RISCVConversion; + defm vfwcvt_f_f_v : RISCVConversion; + defm vfwcvtbf16_f_f_v : RISCVConversion; + + defm vfncvt_f_xu_w : RISCVConversionRoundingMode; + defm vfncvt_f_x_w : RISCVConversionRoundingMode; + defm vfncvt_xu_f_w : RISCVConversionRoundingMode; + defm vfncvt_x_f_w : RISCVConversionRoundingMode; + defm vfncvt_rtz_xu_f_w : RISCVConversion; + defm vfncvt_rtz_x_f_w : RISCVConversion; + defm vfncvt_f_f_w : RISCVConversionRoundingMode; + defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode; + defm vfncvt_rod_f_f_w : RISCVConversion; + } // Output: (vector) // Input: (passthru, mask type input, vl) diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 16f34a89a52ec..e5b7d23deb6c5 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -723,8 +723,29 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); - if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { + if (II->hasScalarOperand() && !II->IsFPIntrinsic) { + MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; + MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); + + auto OldScalar = MI.getOperand(II->ScalarOperand + 2).getReg(); + // Legalize integer vx form intrinsic. + if (MRI.getType(OldScalar).isScalar()) { + if (MRI.getType(OldScalar).getSizeInBits() < sXLen.getSizeInBits()) { + Helper.Observer.changingInstr(MI); + Helper.widenScalarSrc(MI, sXLen, II->ScalarOperand + 2, + TargetOpcode::G_ANYEXT); + Helper.Observer.changedInstr(MI); + } else if (MRI.getType(OldScalar).getSizeInBits() > + sXLen.getSizeInBits()) { + // TODO: i64 in riscv32. + return false; + } + } + } return true; + } switch (IntrinsicID) { default: diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp index a082b18867666..597dd1271d394 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp @@ -500,6 +500,33 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { OpdsMapping[1] = GPRValueMapping; break; } + case TargetOpcode::G_INTRINSIC: { + Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); + + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { + unsigned ScalarIdx = -1; + if (II->hasScalarOperand()) { + ScalarIdx = II->ScalarOperand + 2; + } + for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { + const MachineOperand &MO = MI.getOperand(Idx); + if (!MO.isReg()) + continue; + LLT Ty = MRI.getType(MO.getReg()); + if (Ty.isVector()) { + OpdsMapping[Idx] = + getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()); + } else if (II->IsFPIntrinsic && ScalarIdx == Idx) { + // Chose the right FPR for scalar operand of RVV intrinsics. + OpdsMapping[Idx] = getFPValueMapping(Ty.getSizeInBits()); + } else { + OpdsMapping[Idx] = GPRValueMapping; + } + } + } + break; + } default: // By default map all scalars to GPR. for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 4581c11356aff..3f81ed74c12ed 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -642,6 +642,7 @@ struct RISCVVIntrinsicInfo { unsigned IntrinsicID; uint8_t ScalarOperand; uint8_t VLOperand; + bool IsFPIntrinsic; bool hasScalarOperand() const { // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td. return ScalarOperand != 0xF; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 03e6f43a38945..ecde628fc7e21 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -575,7 +575,7 @@ def RISCVVInversePseudosTable : GenericTable { def RISCVVIntrinsicsTable : GenericTable { let FilterClass = "RISCVVIntrinsic"; let CppTypeName = "RISCVVIntrinsicInfo"; - let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"]; + let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand", "IsFPIntrinsic"]; let PrimaryKey = ["IntrinsicID"]; let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll new file mode 100644 index 0000000000000..21f14d941993b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll @@ -0,0 +1,2443 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -global-isel \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv64i8.i8( + poison, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i16.i16( + poison, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i16.i16( + poison, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i16.i16( + poison, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i16.i16( + poison, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i16.i16( + poison, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i16.i16( + poison, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.i32( + poison, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i32.i32( + poison, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i32.i32( + poison, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i32.i32( + poison, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i32.i32( + poison, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.i8( + poison, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i8.i8( + poison, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i8.i8( + poison, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i8.i8( + poison, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i8.i8( + poison, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i8.i8( + poison, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv64i8.i8( + poison, + %0, + i8 -9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i16.i16( + poison, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i16.i16( + poison, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i16.i16( + poison, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i16.i16( + poison, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i16.i16( + poison, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i16.i16( + poison, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.i32( + poison, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i32.i32( + poison, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i32.i32( + poison, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i32.i32( + poison, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i32.i32( + poison, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll index 8a9cc150907dc..9e092e4337526 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll @@ -762,3 +762,753 @@ entry: ret %a } + +declare @llvm.riscv.vfadd.nxv1f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.f16( + poison, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f16.f16( + poison, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f16.f16( + poison, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f16.f16( + poison, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16f16.f16( + poison, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32f16.f16( + poison, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f32.f32( + poison, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f32.f32( + poison, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f32.f32( + poison, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f32.f32( + poison, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16f32.f32( + poison, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f64.f64( + poison, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f64.f64( + poison, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f64.f64( + poison, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f64.f64( + poison, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +}