From 938ffdbe4d2df0b2351270a7d3a4e4367ea677ed Mon Sep 17 00:00:00 2001 From: Jianjian GUAN Date: Mon, 8 Sep 2025 11:43:24 +0800 Subject: [PATCH 1/3] [RISCV][GISel] Support select vx, vf form rvv intrinsics For vx form, we legalize it with widen scalar. And for vf form, we select the right register bank. --- llvm/include/llvm/IR/IntrinsicsRISCV.td | 163 +- .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 21 +- .../RISCV/GISel/RISCVRegisterBankInfo.cpp | 28 + llvm/lib/Target/RISCV/RISCVISelLowering.h | 1 + .../Target/RISCV/RISCVInstrInfoVPseudos.td | 2 +- .../test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll | 2443 +++++++++++++++++ .../CodeGen/RISCV/GlobalISel/rvv/vfadd.ll | 750 +++++ 7 files changed, 3332 insertions(+), 76 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 878f7b3194830..4d0debd399e5f 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -126,6 +126,7 @@ class RISCVVIntrinsic { Intrinsic IntrinsicID = !cast(NAME); bits<4> ScalarOperand = NoScalarOperand; bits<5> VLOperand = NoVLOperand; + bit IsFPIntrinsic = 0; } let TargetPrefix = "riscv" in { @@ -1442,14 +1443,15 @@ let TargetPrefix = "riscv" in { defm vwmaccus : RISCVTernaryWide; defm vwmaccsu : RISCVTernaryWide; - defm vfadd : RISCVBinaryAAXRoundingMode; - defm vfsub : RISCVBinaryAAXRoundingMode; - defm vfrsub : RISCVBinaryAAXRoundingMode; - - defm vfwadd : RISCVBinaryABXRoundingMode; - defm vfwsub : RISCVBinaryABXRoundingMode; - defm vfwadd_w : RISCVBinaryAAXRoundingMode; - defm vfwsub_w : RISCVBinaryAAXRoundingMode; + let IsFPIntrinsic = 1 in { + defm vfadd : RISCVBinaryAAXRoundingMode; + defm vfsub : RISCVBinaryAAXRoundingMode; + defm vfrsub : RISCVBinaryAAXRoundingMode; + defm vfwadd : RISCVBinaryABXRoundingMode; + defm vfwsub : RISCVBinaryABXRoundingMode; + defm vfwadd_w : RISCVBinaryAAXRoundingMode; + defm vfwsub_w : RISCVBinaryAAXRoundingMode; + } defm vsaddu : RISCVSaturatingBinaryAAX; defm vsadd : RISCVSaturatingBinaryAAX; @@ -1484,6 +1486,7 @@ let TargetPrefix = "riscv" in { llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; + let IsFPIntrinsic = 1; } def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>], @@ -1506,51 +1509,57 @@ let TargetPrefix = "riscv" in { llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; + let IsFPIntrinsic = 1; } - defm vfmul : RISCVBinaryAAXRoundingMode; - defm vfdiv : RISCVBinaryAAXRoundingMode; - defm vfrdiv : RISCVBinaryAAXRoundingMode; + let IsFPIntrinsic = 1 in { + defm vfmul : RISCVBinaryAAXRoundingMode; + defm vfdiv : RISCVBinaryAAXRoundingMode; + defm vfrdiv : RISCVBinaryAAXRoundingMode; - defm vfwmul : RISCVBinaryABXRoundingMode; + defm vfwmul : RISCVBinaryABXRoundingMode; - defm vfmacc : RISCVTernaryAAXARoundingMode; - defm vfnmacc : RISCVTernaryAAXARoundingMode; - defm vfmsac : RISCVTernaryAAXARoundingMode; - defm vfnmsac : RISCVTernaryAAXARoundingMode; - defm vfmadd : RISCVTernaryAAXARoundingMode; - defm vfnmadd : RISCVTernaryAAXARoundingMode; - defm vfmsub : RISCVTernaryAAXARoundingMode; - defm vfnmsub : RISCVTernaryAAXARoundingMode; + defm vfmacc : RISCVTernaryAAXARoundingMode; + defm vfnmacc : RISCVTernaryAAXARoundingMode; + defm vfmsac : RISCVTernaryAAXARoundingMode; + defm vfnmsac : RISCVTernaryAAXARoundingMode; + defm vfmadd : RISCVTernaryAAXARoundingMode; + defm vfnmadd : RISCVTernaryAAXARoundingMode; + defm vfmsub : RISCVTernaryAAXARoundingMode; + defm vfnmsub : RISCVTernaryAAXARoundingMode; - defm vfwmacc : RISCVTernaryWideRoundingMode; - defm vfwmaccbf16 : RISCVTernaryWideRoundingMode; - defm vfwnmacc : RISCVTernaryWideRoundingMode; - defm vfwmsac : RISCVTernaryWideRoundingMode; - defm vfwnmsac : RISCVTernaryWideRoundingMode; + defm vfwmacc : RISCVTernaryWideRoundingMode; + defm vfwmaccbf16 : RISCVTernaryWideRoundingMode; + defm vfwnmacc : RISCVTernaryWideRoundingMode; + defm vfwmsac : RISCVTernaryWideRoundingMode; + defm vfwnmsac : RISCVTernaryWideRoundingMode; - defm vfsqrt : RISCVUnaryAARoundingMode; - defm vfrsqrt7 : RISCVUnaryAA; - defm vfrec7 : RISCVUnaryAARoundingMode; + defm vfsqrt : RISCVUnaryAARoundingMode; + defm vfrsqrt7 : RISCVUnaryAA; + defm vfrec7 : RISCVUnaryAARoundingMode; - defm vfmin : RISCVBinaryAAX; - defm vfmax : RISCVBinaryAAX; + defm vfmin : RISCVBinaryAAX; + defm vfmax : RISCVBinaryAAX; - defm vfsgnj : RISCVBinaryAAX; - defm vfsgnjn : RISCVBinaryAAX; - defm vfsgnjx : RISCVBinaryAAX; + defm vfsgnj : RISCVBinaryAAX; + defm vfsgnjn : RISCVBinaryAAX; + defm vfsgnjx : RISCVBinaryAAX; - defm vfclass : RISCVClassify; + defm vfclass : RISCVClassify; - defm vfmerge : RISCVBinaryWithV0; + defm vfmerge : RISCVBinaryWithV0; + } defm vslideup : RVVSlide; defm vslidedown : RVVSlide; defm vslide1up : RISCVBinaryAAX; defm vslide1down : RISCVBinaryAAX; - defm vfslide1up : RISCVBinaryAAX; - defm vfslide1down : RISCVBinaryAAX; + + let IsFPIntrinsic = 1 in { + defm vfslide1up : RISCVBinaryAAX; + defm vfslide1down : RISCVBinaryAAX; + } defm vrgather_vv : RISCVRGatherVV; defm vrgather_vx : RISCVRGatherVX; @@ -1571,12 +1580,14 @@ let TargetPrefix = "riscv" in { defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode; defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode; - defm vmfeq : RISCVCompare; - defm vmfne : RISCVCompare; - defm vmflt : RISCVCompare; - defm vmfle : RISCVCompare; - defm vmfgt : RISCVCompare; - defm vmfge : RISCVCompare; + let IsFPIntrinsic = 1 in { + defm vmfeq : RISCVCompare; + defm vmfne : RISCVCompare; + defm vmflt : RISCVCompare; + defm vmfle : RISCVCompare; + defm vmfgt : RISCVCompare; + defm vmfge : RISCVCompare; + } defm vredsum : RISCVReduction; defm vredand : RISCVReduction; @@ -1590,13 +1601,15 @@ let TargetPrefix = "riscv" in { defm vwredsumu : RISCVReduction; defm vwredsum : RISCVReduction; - defm vfredosum : RISCVReductionRoundingMode; - defm vfredusum : RISCVReductionRoundingMode; - defm vfredmin : RISCVReduction; - defm vfredmax : RISCVReduction; + let IsFPIntrinsic = 1 in { + defm vfredosum : RISCVReductionRoundingMode; + defm vfredusum : RISCVReductionRoundingMode; + defm vfredmin : RISCVReduction; + defm vfredmax : RISCVReduction; - defm vfwredusum : RISCVReductionRoundingMode; - defm vfwredosum : RISCVReductionRoundingMode; + defm vfwredusum : RISCVReductionRoundingMode; + defm vfwredosum : RISCVReductionRoundingMode; + } def int_riscv_vmand: RISCVBinaryAAAUnMasked; def int_riscv_vmnand: RISCVBinaryAAAUnMasked; @@ -1615,31 +1628,33 @@ let TargetPrefix = "riscv" in { defm vmsof : RISCVMaskedUnaryMOut; defm vmsif : RISCVMaskedUnaryMOut; - defm vfcvt_xu_f_v : RISCVConversionRoundingMode; - defm vfcvt_x_f_v : RISCVConversionRoundingMode; - defm vfcvt_rtz_xu_f_v : RISCVConversion; - defm vfcvt_rtz_x_f_v : RISCVConversion; - defm vfcvt_f_xu_v : RISCVConversionRoundingMode; - defm vfcvt_f_x_v : RISCVConversionRoundingMode; - - defm vfwcvt_f_xu_v : RISCVConversion; - defm vfwcvt_f_x_v : RISCVConversion; - defm vfwcvt_xu_f_v : RISCVConversionRoundingMode; - defm vfwcvt_x_f_v : RISCVConversionRoundingMode; - defm vfwcvt_rtz_xu_f_v : RISCVConversion; - defm vfwcvt_rtz_x_f_v : RISCVConversion; - defm vfwcvt_f_f_v : RISCVConversion; - defm vfwcvtbf16_f_f_v : RISCVConversion; - - defm vfncvt_f_xu_w : RISCVConversionRoundingMode; - defm vfncvt_f_x_w : RISCVConversionRoundingMode; - defm vfncvt_xu_f_w : RISCVConversionRoundingMode; - defm vfncvt_x_f_w : RISCVConversionRoundingMode; - defm vfncvt_rtz_xu_f_w : RISCVConversion; - defm vfncvt_rtz_x_f_w : RISCVConversion; - defm vfncvt_f_f_w : RISCVConversionRoundingMode; - defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode; - defm vfncvt_rod_f_f_w : RISCVConversion; + let IsFPIntrinsic = 1 in { + defm vfcvt_xu_f_v : RISCVConversionRoundingMode; + defm vfcvt_x_f_v : RISCVConversionRoundingMode; + defm vfcvt_rtz_xu_f_v : RISCVConversion; + defm vfcvt_rtz_x_f_v : RISCVConversion; + defm vfcvt_f_xu_v : RISCVConversionRoundingMode; + defm vfcvt_f_x_v : RISCVConversionRoundingMode; + + defm vfwcvt_f_xu_v : RISCVConversion; + defm vfwcvt_f_x_v : RISCVConversion; + defm vfwcvt_xu_f_v : RISCVConversionRoundingMode; + defm vfwcvt_x_f_v : RISCVConversionRoundingMode; + defm vfwcvt_rtz_xu_f_v : RISCVConversion; + defm vfwcvt_rtz_x_f_v : RISCVConversion; + defm vfwcvt_f_f_v : RISCVConversion; + defm vfwcvtbf16_f_f_v : RISCVConversion; + + defm vfncvt_f_xu_w : RISCVConversionRoundingMode; + defm vfncvt_f_x_w : RISCVConversionRoundingMode; + defm vfncvt_xu_f_w : RISCVConversionRoundingMode; + defm vfncvt_x_f_w : RISCVConversionRoundingMode; + defm vfncvt_rtz_xu_f_w : RISCVConversion; + defm vfncvt_rtz_x_f_w : RISCVConversion; + defm vfncvt_f_f_w : RISCVConversionRoundingMode; + defm vfncvtbf16_f_f_w : RISCVConversionRoundingMode; + defm vfncvt_rod_f_f_w : RISCVConversion; + } // Output: (vector) // Input: (passthru, mask type input, vl) diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 16f34a89a52ec..26d47e1ce8d48 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -723,8 +723,27 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); - if (RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) + if (auto *II = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { + if (II->hasScalarOperand() && !II->IsFPIntrinsic) { + MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; + MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); + + auto OldScalar = MI.getOperand(II->ScalarOperand + 2).getReg(); + // Legalize integer vx form intrinsic. + if (MRI.getType(OldScalar).isScalar()) { + if (MRI.getType(OldScalar).getSizeInBits() < sXLen.getSizeInBits()) { + Helper.Observer.changingInstr(MI); + Helper.widenScalarSrc(MI, sXLen, II->ScalarOperand + 2, + TargetOpcode::G_ANYEXT); + Helper.Observer.changedInstr(MI); + } else if (MRI.getType(OldScalar).getSizeInBits() > + sXLen.getSizeInBits()) { + // TODO: i64 in riscv32. + } + } + } return true; + } switch (IntrinsicID) { default: diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp index a082b18867666..16d6c9a5652d3 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp @@ -500,6 +500,34 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { OpdsMapping[1] = GPRValueMapping; break; } + case TargetOpcode::G_INTRINSIC: { + Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); + + if (auto *II = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { + unsigned ScalarIdx = -1; + if (II->hasScalarOperand()) { + ScalarIdx = II->ScalarOperand + 2; + } + for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { + auto &MO = MI.getOperand(Idx); + if (!MO.isReg() || !MO.getReg()) + continue; + LLT Ty = MRI.getType(MO.getReg()); + if (!Ty.isValid()) + continue; + + if (Ty.isVector()) + OpdsMapping[Idx] = + getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()); + // Chose the right FPR for scalar operand of RVV intrinsics. + else if (II->IsFPIntrinsic && ScalarIdx == Idx) + OpdsMapping[Idx] = getFPValueMapping(Ty.getSizeInBits()); + else + OpdsMapping[Idx] = GPRValueMapping; + } + } + break; + } default: // By default map all scalars to GPR. for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 4581c11356aff..3f81ed74c12ed 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -642,6 +642,7 @@ struct RISCVVIntrinsicInfo { unsigned IntrinsicID; uint8_t ScalarOperand; uint8_t VLOperand; + bool IsFPIntrinsic; bool hasScalarOperand() const { // 0xF is not valid. See NoScalarOperand in IntrinsicsRISCV.td. return ScalarOperand != 0xF; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 03e6f43a38945..ecde628fc7e21 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -575,7 +575,7 @@ def RISCVVInversePseudosTable : GenericTable { def RISCVVIntrinsicsTable : GenericTable { let FilterClass = "RISCVVIntrinsic"; let CppTypeName = "RISCVVIntrinsicInfo"; - let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand"]; + let Fields = ["IntrinsicID", "ScalarOperand", "VLOperand", "IsFPIntrinsic"]; let PrimaryKey = ["IntrinsicID"]; let PrimaryKeyName = "getRISCVVIntrinsicInfo"; } diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll new file mode 100644 index 0000000000000..56616c286b6d8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll @@ -0,0 +1,2443 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -global-isel \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -global-isel \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vadd.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.nxv2i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.nxv4i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i8_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i8_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.nxv64i8( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.nxv1i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i16_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.nxv2i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i16_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i16_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i16_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i16_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.nxv32i16( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i32_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i32_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i32_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i32_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.nxv16i32( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i64.nxv1i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i64_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i64.nxv2i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv2i64_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i64.nxv4i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv4i64_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i64.nxv8i64( + , + , + , + iXLen); + +define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv64i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv32i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv1i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv2i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vadd.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv4i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv8i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vadd.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vadd.nxv16i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vadd_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vadd.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, iXLen); + +define @intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vx_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vadd.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i8.i8( + undef, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i8.i8( + undef, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i8.i8( + undef, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i8.i8( + undef, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i8.i8( + undef, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i8.i8( + undef, + %0, + i8 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, -9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv64i8.i8( + undef, + %0, + i8 -9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv64i8.i8( + %0, + %1, + i8 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i16.i16( + undef, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i16.i16( + undef, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i16.i16( + undef, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i16.i16( + undef, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i16.i16( + undef, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv32i16.i16( + undef, + %0, + i16 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv32i16.i16( + %0, + %1, + i16 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.i32( + undef, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv1i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv2i32.i32( + undef, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vadd.vi v8, v9, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv2i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv4i32.i32( + undef, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vadd.vi v8, v10, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv4i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv8i32.i32( + undef, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vadd.vi v8, v12, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv8i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vadd_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vadd.vi v8, v8, 9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv16i32.i32( + undef, + %0, + i32 9, + iXLen %1) + + ret %a +} + +define @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vadd.vi v8, v16, -9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.mask.nxv16i32.i32( + %0, + %1, + i32 -9, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll index 8a9cc150907dc..af5af162e78a8 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll @@ -762,3 +762,753 @@ entry: ret %a } + +declare @llvm.riscv.vfadd.nxv1f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.f16( + undef, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f16.f16( + undef, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f16.f16( + undef, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f16.f16( + undef, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16f16.f16( + undef, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32f16.f16( + , + , + half, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( %0, half %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32f16.f16( + undef, + %0, + half %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32f16.f16( + , + , + half, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f32.f32( + undef, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f32.f32( + undef, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f32.f32( + undef, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f32.f32( + undef, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16f32.f32( + , + , + float, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( %0, float %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16f32.f32( + undef, + %0, + float %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16f32.f32( + , + , + float, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f64.f64( + undef, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2f64.f64( + undef, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4f64.f64( + undef, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8f64.f64( + , + , + double, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( %0, double %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8f64.f64( + undef, + %0, + double %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8f64.f64( + , + , + double, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} From 3845e027a28a5c5a1c7f7b42dad74f6f54392c33 Mon Sep 17 00:00:00 2001 From: Jianjian GUAN Date: Wed, 17 Sep 2025 18:02:19 +0800 Subject: [PATCH 2/3] Address comment --- .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 3 +- .../RISCV/GISel/RISCVRegisterBankInfo.cpp | 19 ++- .../test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll | 116 +++++++++--------- .../CodeGen/RISCV/GlobalISel/rvv/vfadd.ll | 30 ++--- 4 files changed, 84 insertions(+), 84 deletions(-) diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 26d47e1ce8d48..feb73e1d8f2a3 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -723,7 +723,8 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); - if (auto *II = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { if (II->hasScalarOperand() && !II->IsFPIntrinsic) { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp index 16d6c9a5652d3..597dd1271d394 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp @@ -503,27 +503,26 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case TargetOpcode::G_INTRINSIC: { Intrinsic::ID IntrinsicID = cast(MI).getIntrinsicID(); - if (auto *II = RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { + if (const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = + RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntrinsicID)) { unsigned ScalarIdx = -1; if (II->hasScalarOperand()) { ScalarIdx = II->ScalarOperand + 2; } for (unsigned Idx = 0; Idx < NumOperands; ++Idx) { - auto &MO = MI.getOperand(Idx); - if (!MO.isReg() || !MO.getReg()) + const MachineOperand &MO = MI.getOperand(Idx); + if (!MO.isReg()) continue; LLT Ty = MRI.getType(MO.getReg()); - if (!Ty.isValid()) - continue; - - if (Ty.isVector()) + if (Ty.isVector()) { OpdsMapping[Idx] = getVRBValueMapping(Ty.getSizeInBits().getKnownMinValue()); - // Chose the right FPR for scalar operand of RVV intrinsics. - else if (II->IsFPIntrinsic && ScalarIdx == Idx) + } else if (II->IsFPIntrinsic && ScalarIdx == Idx) { + // Chose the right FPR for scalar operand of RVV intrinsics. OpdsMapping[Idx] = getFPValueMapping(Ty.getSizeInBits()); - else + } else { OpdsMapping[Idx] = GPRValueMapping; + } } } break; diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll index 56616c286b6d8..21f14d941993b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vadd.ll @@ -18,7 +18,7 @@ define @intrinsic_vadd_vv_nxv1i8_nxv1i8_nxv1i8( @llvm.riscv.vadd.nxv1i8.nxv1i8( - undef, + poison, %0, %1, iXLen %2) @@ -64,7 +64,7 @@ define @intrinsic_vadd_vv_nxv2i8_nxv2i8_nxv2i8( @llvm.riscv.vadd.nxv2i8.nxv2i8( - undef, + poison, %0, %1, iXLen %2) @@ -110,7 +110,7 @@ define @intrinsic_vadd_vv_nxv4i8_nxv4i8_nxv4i8( @llvm.riscv.vadd.nxv4i8.nxv4i8( - undef, + poison, %0, %1, iXLen %2) @@ -156,7 +156,7 @@ define @intrinsic_vadd_vv_nxv8i8_nxv8i8_nxv8i8( @llvm.riscv.vadd.nxv8i8.nxv8i8( - undef, + poison, %0, %1, iXLen %2) @@ -202,7 +202,7 @@ define @intrinsic_vadd_vv_nxv16i8_nxv16i8_nxv16i8( @llvm.riscv.vadd.nxv16i8.nxv16i8( - undef, + poison, %0, %1, iXLen %2) @@ -248,7 +248,7 @@ define @intrinsic_vadd_vv_nxv32i8_nxv32i8_nxv32i8( @llvm.riscv.vadd.nxv32i8.nxv32i8( - undef, + poison, %0, %1, iXLen %2) @@ -294,7 +294,7 @@ define @intrinsic_vadd_vv_nxv64i8_nxv64i8_nxv64i8( @llvm.riscv.vadd.nxv64i8.nxv64i8( - undef, + poison, %0, %1, iXLen %2) @@ -341,7 +341,7 @@ define @intrinsic_vadd_vv_nxv1i16_nxv1i16_nxv1i16( @llvm.riscv.vadd.nxv1i16.nxv1i16( - undef, + poison, %0, %1, iXLen %2) @@ -387,7 +387,7 @@ define @intrinsic_vadd_vv_nxv2i16_nxv2i16_nxv2i16( @llvm.riscv.vadd.nxv2i16.nxv2i16( - undef, + poison, %0, %1, iXLen %2) @@ -433,7 +433,7 @@ define @intrinsic_vadd_vv_nxv4i16_nxv4i16_nxv4i16( @llvm.riscv.vadd.nxv4i16.nxv4i16( - undef, + poison, %0, %1, iXLen %2) @@ -479,7 +479,7 @@ define @intrinsic_vadd_vv_nxv8i16_nxv8i16_nxv8i16( @llvm.riscv.vadd.nxv8i16.nxv8i16( - undef, + poison, %0, %1, iXLen %2) @@ -525,7 +525,7 @@ define @intrinsic_vadd_vv_nxv16i16_nxv16i16_nxv16i16( @llvm.riscv.vadd.nxv16i16.nxv16i16( - undef, + poison, %0, %1, iXLen %2) @@ -571,7 +571,7 @@ define @intrinsic_vadd_vv_nxv32i16_nxv32i16_nxv32i16( @llvm.riscv.vadd.nxv32i16.nxv32i16( - undef, + poison, %0, %1, iXLen %2) @@ -618,7 +618,7 @@ define @intrinsic_vadd_vv_nxv1i32_nxv1i32_nxv1i32( @llvm.riscv.vadd.nxv1i32.nxv1i32( - undef, + poison, %0, %1, iXLen %2) @@ -664,7 +664,7 @@ define @intrinsic_vadd_vv_nxv2i32_nxv2i32_nxv2i32( @llvm.riscv.vadd.nxv2i32.nxv2i32( - undef, + poison, %0, %1, iXLen %2) @@ -710,7 +710,7 @@ define @intrinsic_vadd_vv_nxv4i32_nxv4i32_nxv4i32( @llvm.riscv.vadd.nxv4i32.nxv4i32( - undef, + poison, %0, %1, iXLen %2) @@ -756,7 +756,7 @@ define @intrinsic_vadd_vv_nxv8i32_nxv8i32_nxv8i32( @llvm.riscv.vadd.nxv8i32.nxv8i32( - undef, + poison, %0, %1, iXLen %2) @@ -802,7 +802,7 @@ define @intrinsic_vadd_vv_nxv16i32_nxv16i32_nxv16i32( @llvm.riscv.vadd.nxv16i32.nxv16i32( - undef, + poison, %0, %1, iXLen %2) @@ -849,7 +849,7 @@ define @intrinsic_vadd_vv_nxv1i64_nxv1i64_nxv1i64( @llvm.riscv.vadd.nxv1i64.nxv1i64( - undef, + poison, %0, %1, iXLen %2) @@ -895,7 +895,7 @@ define @intrinsic_vadd_vv_nxv2i64_nxv2i64_nxv2i64( @llvm.riscv.vadd.nxv2i64.nxv2i64( - undef, + poison, %0, %1, iXLen %2) @@ -941,7 +941,7 @@ define @intrinsic_vadd_vv_nxv4i64_nxv4i64_nxv4i64( @llvm.riscv.vadd.nxv4i64.nxv4i64( - undef, + poison, %0, %1, iXLen %2) @@ -987,7 +987,7 @@ define @intrinsic_vadd_vv_nxv8i64_nxv8i64_nxv8i64( @llvm.riscv.vadd.nxv8i64.nxv8i64( - undef, + poison, %0, %1, iXLen %2) @@ -1034,7 +1034,7 @@ define @intrinsic_vadd_vx_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1080,7 +1080,7 @@ define @intrinsic_vadd_vx_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1126,7 +1126,7 @@ define @intrinsic_vadd_vx_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1172,7 +1172,7 @@ define @intrinsic_vadd_vx_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1218,7 +1218,7 @@ define @intrinsic_vadd_vx_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1264,7 +1264,7 @@ define @intrinsic_vadd_vx_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1310,7 +1310,7 @@ define @intrinsic_vadd_vx_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( - undef, + poison, %0, i8 %1, iXLen %2) @@ -1356,7 +1356,7 @@ define @intrinsic_vadd_vx_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( - undef, + poison, %0, i16 %1, iXLen %2) @@ -1402,7 +1402,7 @@ define @intrinsic_vadd_vx_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( - undef, + poison, %0, i16 %1, iXLen %2) @@ -1448,7 +1448,7 @@ define @intrinsic_vadd_vx_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( - undef, + poison, %0, i16 %1, iXLen %2) @@ -1494,7 +1494,7 @@ define @intrinsic_vadd_vx_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( - undef, + poison, %0, i16 %1, iXLen %2) @@ -1540,7 +1540,7 @@ define @intrinsic_vadd_vx_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( - undef, + poison, %0, i16 %1, iXLen %2) @@ -1586,7 +1586,7 @@ define @intrinsic_vadd_vx_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( - undef, + poison, %0, i16 %1, iXLen %2) @@ -1632,7 +1632,7 @@ define @intrinsic_vadd_vx_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( - undef, + poison, %0, i32 %1, iXLen %2) @@ -1678,7 +1678,7 @@ define @intrinsic_vadd_vx_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( - undef, + poison, %0, i32 %1, iXLen %2) @@ -1724,7 +1724,7 @@ define @intrinsic_vadd_vx_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( - undef, + poison, %0, i32 %1, iXLen %2) @@ -1770,7 +1770,7 @@ define @intrinsic_vadd_vx_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( - undef, + poison, %0, i32 %1, iXLen %2) @@ -1816,7 +1816,7 @@ define @intrinsic_vadd_vx_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( - undef, + poison, %0, i32 %1, iXLen %2) @@ -1856,7 +1856,7 @@ define @intrinsic_vadd_vi_nxv1i8_nxv1i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv1i8.i8( - undef, + poison, %0, i8 9, iXLen %1) @@ -1889,7 +1889,7 @@ define @intrinsic_vadd_vi_nxv2i8_nxv2i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv2i8.i8( - undef, + poison, %0, i8 9, iXLen %1) @@ -1922,7 +1922,7 @@ define @intrinsic_vadd_vi_nxv4i8_nxv4i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv4i8.i8( - undef, + poison, %0, i8 9, iXLen %1) @@ -1955,7 +1955,7 @@ define @intrinsic_vadd_vi_nxv8i8_nxv8i8_i8( % ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.nxv8i8.i8( - undef, + poison, %0, i8 9, iXLen %1) @@ -1988,7 +1988,7 @@ define @intrinsic_vadd_vi_nxv16i8_nxv16i8_i8( @llvm.riscv.vadd.nxv16i8.i8( - undef, + poison, %0, i8 9, iXLen %1) @@ -2021,7 +2021,7 @@ define @intrinsic_vadd_vi_nxv32i8_nxv32i8_i8( @llvm.riscv.vadd.nxv32i8.i8( - undef, + poison, %0, i8 9, iXLen %1) @@ -2054,7 +2054,7 @@ define @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8( @llvm.riscv.vadd.nxv64i8.i8( - undef, + poison, %0, i8 -9, iXLen %1) @@ -2087,7 +2087,7 @@ define @intrinsic_vadd_vi_nxv1i16_nxv1i16_i16( @llvm.riscv.vadd.nxv1i16.i16( - undef, + poison, %0, i16 9, iXLen %1) @@ -2120,7 +2120,7 @@ define @intrinsic_vadd_vi_nxv2i16_nxv2i16_i16( @llvm.riscv.vadd.nxv2i16.i16( - undef, + poison, %0, i16 9, iXLen %1) @@ -2153,7 +2153,7 @@ define @intrinsic_vadd_vi_nxv4i16_nxv4i16_i16( @llvm.riscv.vadd.nxv4i16.i16( - undef, + poison, %0, i16 9, iXLen %1) @@ -2186,7 +2186,7 @@ define @intrinsic_vadd_vi_nxv8i16_nxv8i16_i16( @llvm.riscv.vadd.nxv8i16.i16( - undef, + poison, %0, i16 9, iXLen %1) @@ -2219,7 +2219,7 @@ define @intrinsic_vadd_vi_nxv16i16_nxv16i16_i16( @llvm.riscv.vadd.nxv16i16.i16( - undef, + poison, %0, i16 9, iXLen %1) @@ -2252,7 +2252,7 @@ define @intrinsic_vadd_vi_nxv32i16_nxv32i16_i16( @llvm.riscv.vadd.nxv32i16.i16( - undef, + poison, %0, i16 9, iXLen %1) @@ -2285,7 +2285,7 @@ define @intrinsic_vadd_vi_nxv1i32_nxv1i32_i32( @llvm.riscv.vadd.nxv1i32.i32( - undef, + poison, %0, i32 9, iXLen %1) @@ -2318,7 +2318,7 @@ define @intrinsic_vadd_vi_nxv2i32_nxv2i32_i32( @llvm.riscv.vadd.nxv2i32.i32( - undef, + poison, %0, i32 9, iXLen %1) @@ -2351,7 +2351,7 @@ define @intrinsic_vadd_vi_nxv4i32_nxv4i32_i32( @llvm.riscv.vadd.nxv4i32.i32( - undef, + poison, %0, i32 9, iXLen %1) @@ -2384,7 +2384,7 @@ define @intrinsic_vadd_vi_nxv8i32_nxv8i32_i32( @llvm.riscv.vadd.nxv8i32.i32( - undef, + poison, %0, i32 9, iXLen %1) @@ -2417,7 +2417,7 @@ define @intrinsic_vadd_vi_nxv16i32_nxv16i32_i32( @llvm.riscv.vadd.nxv16i32.i32( - undef, + poison, %0, i32 9, iXLen %1) diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll index af5af162e78a8..9e092e4337526 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vfadd.ll @@ -779,7 +779,7 @@ define @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16( @llvm.riscv.vfadd.nxv1f16.f16( - undef, + poison, %0, half %1, iXLen 0, iXLen %2) @@ -829,7 +829,7 @@ define @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16( @llvm.riscv.vfadd.nxv2f16.f16( - undef, + poison, %0, half %1, iXLen 0, iXLen %2) @@ -879,7 +879,7 @@ define @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16( @llvm.riscv.vfadd.nxv4f16.f16( - undef, + poison, %0, half %1, iXLen 0, iXLen %2) @@ -929,7 +929,7 @@ define @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16( @llvm.riscv.vfadd.nxv8f16.f16( - undef, + poison, %0, half %1, iXLen 0, iXLen %2) @@ -979,7 +979,7 @@ define @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16( @llvm.riscv.vfadd.nxv16f16.f16( - undef, + poison, %0, half %1, iXLen 0, iXLen %2) @@ -1029,7 +1029,7 @@ define @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16( @llvm.riscv.vfadd.nxv32f16.f16( - undef, + poison, %0, half %1, iXLen 0, iXLen %2) @@ -1079,7 +1079,7 @@ define @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32( @llvm.riscv.vfadd.nxv1f32.f32( - undef, + poison, %0, float %1, iXLen 0, iXLen %2) @@ -1129,7 +1129,7 @@ define @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32( @llvm.riscv.vfadd.nxv2f32.f32( - undef, + poison, %0, float %1, iXLen 0, iXLen %2) @@ -1179,7 +1179,7 @@ define @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32( @llvm.riscv.vfadd.nxv4f32.f32( - undef, + poison, %0, float %1, iXLen 0, iXLen %2) @@ -1229,7 +1229,7 @@ define @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32( @llvm.riscv.vfadd.nxv8f32.f32( - undef, + poison, %0, float %1, iXLen 0, iXLen %2) @@ -1279,7 +1279,7 @@ define @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32( @llvm.riscv.vfadd.nxv16f32.f32( - undef, + poison, %0, float %1, iXLen 0, iXLen %2) @@ -1329,7 +1329,7 @@ define @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64( @llvm.riscv.vfadd.nxv1f64.f64( - undef, + poison, %0, double %1, iXLen 0, iXLen %2) @@ -1379,7 +1379,7 @@ define @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64( @llvm.riscv.vfadd.nxv2f64.f64( - undef, + poison, %0, double %1, iXLen 0, iXLen %2) @@ -1429,7 +1429,7 @@ define @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64( @llvm.riscv.vfadd.nxv4f64.f64( - undef, + poison, %0, double %1, iXLen 0, iXLen %2) @@ -1479,7 +1479,7 @@ define @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64( @llvm.riscv.vfadd.nxv8f64.f64( - undef, + poison, %0, double %1, iXLen 0, iXLen %2) From 808109a5e86fbfbaa7d57829b9d9a5ed4c1e0a40 Mon Sep 17 00:00:00 2001 From: Jianjian GUAN Date: Thu, 18 Sep 2025 14:36:26 +0800 Subject: [PATCH 3/3] Address comment --- llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index feb73e1d8f2a3..e5b7d23deb6c5 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -740,6 +740,7 @@ bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, } else if (MRI.getType(OldScalar).getSizeInBits() > sXLen.getSizeInBits()) { // TODO: i64 in riscv32. + return false; } } }