diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 9450875dece09d..4e90f06192b146 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -96,13 +96,44 @@ let TargetPrefix = "riscv" in { [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 3; } + // For destination vector type is NOT the same as first source vector. + // Input: (vector_in, vector_in/scalar_in, vl) + class RISCVBinaryABXNoMask + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + // For destination vector type is NOT the same as first source vector (with mask). + // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl) + class RISCVBinaryABXMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty, + llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 3; + } multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; } + multiclass RISCVBinaryABX { + def "int_riscv_" # NAME : RISCVBinaryABXNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask; + } defm vadd : RISCVBinaryAAX; defm vsub : RISCVBinaryAAX; defm vrsub : RISCVBinaryAAX; + + defm vwaddu : RISCVBinaryABX; + defm vwadd : RISCVBinaryABX; + defm vwaddu_w : RISCVBinaryAAX; + defm vwadd_w : RISCVBinaryAAX; + defm vwsubu : RISCVBinaryABX; + defm vwsub : RISCVBinaryABX; + defm vwsubu_w : RISCVBinaryAAX; + defm vwsub_w : RISCVBinaryAAX; + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 468ed00880abd5..bae00bb6cda645 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -36,21 +36,22 @@ def NoX0 : SDNodeXForm { +class LMULInfo { bits<3> value = lmul; // This is encoded as the vlmul field of vtype. VReg vrclass = regclass; + VReg wvrclass = wregclass; string MX = mx; } // Associate LMUL with tablegen records of register classes. -def V_M1 : LMULInfo<0b000, VR, "M1">; -def V_M2 : LMULInfo<0b001, VRM2, "M2">; -def V_M4 : LMULInfo<0b010, VRM4, "M4">; -def V_M8 : LMULInfo<0b011, VRM8, "M8">; +def V_M1 : LMULInfo<0b000, VR, VRM2, "M1">; +def V_M2 : LMULInfo<0b001, VRM2, VRM4, "M2">; +def V_M4 : LMULInfo<0b010, VRM4, VRM8, "M4">; +def V_M8 : LMULInfo<0b011, VRM8, NoVReg, "M8">; -def V_MF8 : LMULInfo<0b101, VR, "MF8">; -def V_MF4 : LMULInfo<0b110, VR, "MF4">; -def V_MF2 : LMULInfo<0b111, VR, "MF2">; +def V_MF8 : LMULInfo<0b101, VR, VR, "MF8">; +def V_MF4 : LMULInfo<0b110, VR, VR, "MF4">; +def V_MF2 : LMULInfo<0b111, VR, VR, "MF2">; // Used to iterate over all possible LMULs. def MxList { @@ -101,35 +102,61 @@ class GroupVTypeInfo AllVectors = { defset list AllIntegerVectors = { - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - def : VTypeInfo; - - def : GroupVTypeInfo; - def : GroupVTypeInfo; - def : GroupVTypeInfo; - - def : GroupVTypeInfo; - def : GroupVTypeInfo; - def : GroupVTypeInfo; - - def : GroupVTypeInfo; - def : GroupVTypeInfo; - def : GroupVTypeInfo; - - def : GroupVTypeInfo; - def : GroupVTypeInfo; - def : GroupVTypeInfo; + def VI8MF8: VTypeInfo; + def VI8MF4: VTypeInfo; + def VI8MF2: VTypeInfo; + def VI8M1: VTypeInfo; + def VI16MF4: VTypeInfo; + def VI16MF2: VTypeInfo; + def VI16M1: VTypeInfo; + def VI32MF2: VTypeInfo; + def VI32M1: VTypeInfo; + def VI64M1: VTypeInfo; + + def VI8M2: GroupVTypeInfo; + def VI8M4: GroupVTypeInfo; + def VI8M8: GroupVTypeInfo; + + def VI16M2: GroupVTypeInfo; + def VI16M4: GroupVTypeInfo; + def VI16M8: GroupVTypeInfo; + + def VI32M2: GroupVTypeInfo; + def VI32M4: GroupVTypeInfo; + def VI32M8: GroupVTypeInfo; + + def VI64M2: GroupVTypeInfo; + def VI64M4: GroupVTypeInfo; + def VI64M8: GroupVTypeInfo; } } +class VTypeInfoToWide +{ + VTypeInfo Vti = vti; + VTypeInfo Wti = wti; +} + +defset list AllWidenableIntVectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + // This class holds the record of the RISCVVPseudoTable below. // This represents the information we need in codegen for each pseudo. // The definition should be consistent with `struct PseudoInfo` in @@ -194,6 +221,16 @@ class GetVRegNoV0 { !eq(1, 1) : VRegClass); } +// Join strings in list using separator and ignoring empty elements +class Join strings, string separator> { + string ret = !foldl(!head(strings), !tail(strings), a, b, + !cond( + !and(!empty(a), !empty(b)) : "", + !empty(a) : b, + !empty(b) : a, + 1 : a#separator#b)); +} + class VPseudo : Pseudo, RISCVVPseudo { let BaseInstr = instr; @@ -202,7 +239,8 @@ class VPseudo : class VPseudoBinaryNoMask : + DAGOperand Op2Class, + string Constraint> : Pseudo<(outs RetClass:$rd), (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo { @@ -210,6 +248,7 @@ class VPseudoBinaryNoMask : + DAGOperand Op2Class, + string Constraint> : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, Op1Class:$rs2, Op2Class:$rs1, @@ -229,7 +269,7 @@ class VPseudoBinaryMask.ret; let Uses = [VL, VTYPE]; let VLIndex = 5; let SEWIndex = 6; @@ -240,10 +280,13 @@ class VPseudoBinaryMask { + LMULInfo MInfo, + string Constraint = ""> { let VLMul = MInfo.value in { - def "_" # MInfo.MX : VPseudoBinaryNoMask; - def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; + def "_" # MInfo.MX : VPseudoBinaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; } } @@ -262,6 +305,37 @@ multiclass VPseudoBinaryV_VI { defm _VI : VPseudoBinary; } +// We use earlyclobber here due to +// * The destination EEW is smaller than the source EEW and the overlap is +// in the lowest-numbered part of the source register group is legal. +// Otherwise, it is illegal. +// * The destination EEW is greater than the source EEW, the source EMUL is +// at least 1, and the overlap is in the highest-numbered part of the +// destination register group is legal. Otherwise, it is illegal. +multiclass VPseudoBinaryW_VV { + foreach m = MxList.m[0-5] in + defm _VV : VPseudoBinary; +} + +multiclass VPseudoBinaryW_VX { + foreach m = MxList.m[0-5] in + defm _VX : VPseudoBinary; +} + +multiclass VPseudoBinaryW_WV { + foreach m = MxList.m[0-5] in + defm _WV : VPseudoBinary; +} + +multiclass VPseudoBinaryW_WX { + foreach m = MxList.m[0-5] in + defm _WX : VPseudoBinary; +} + multiclass VPseudoBinaryV_VV_VX_VI { defm "" : VPseudoBinaryV_VV; defm "" : VPseudoBinaryV_VX; @@ -278,6 +352,16 @@ multiclass VPseudoBinaryV_VX_VI { defm "" : VPseudoBinaryV_VI; } +multiclass VPseudoBinaryW_VV_VX { + defm "" : VPseudoBinaryW_VV; + defm "" : VPseudoBinaryW_VX; +} + +multiclass VPseudoBinaryW_WV_WX { + defm "" : VPseudoBinaryW_WV; + defm "" : VPseudoBinaryW_WX; +} + //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// @@ -396,6 +480,50 @@ multiclass VPatBinaryV_VI; } +multiclass VPatBinaryW_VV { + foreach VtiToWti = AllWidenableIntVectors in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + defm : VPatBinary; + } +} + +multiclass VPatBinaryW_VX { + foreach VtiToWti = AllWidenableIntVectors in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + defm : VPatBinary; + } +} + +multiclass VPatBinaryW_WV { + foreach VtiToWti = AllWidenableIntVectors in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + defm : VPatBinary; + } +} + +multiclass VPatBinaryW_WX { + foreach VtiToWti = AllWidenableIntVectors in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + defm : VPatBinary; + } +} + multiclass VPatBinaryV_VV_VX_VI vtilist> { @@ -418,6 +546,18 @@ multiclass VPatBinaryV_VX_VI; } +multiclass VPatBinaryW_VV_VX +{ + defm "" : VPatBinaryW_VV; + defm "" : VPatBinaryW_VX; +} + +multiclass VPatBinaryW_WV_WX +{ + defm "" : VPatBinaryW_WV; + defm "" : VPatBinaryW_WX; +} + //===----------------------------------------------------------------------===// // Pseudo instructions and patterns. //===----------------------------------------------------------------------===// @@ -542,6 +682,19 @@ defm PseudoVADD : VPseudoBinaryV_VV_VX_VI; defm PseudoVSUB : VPseudoBinaryV_VV_VX; defm PseudoVRSUB : VPseudoBinaryV_VX_VI; +//===----------------------------------------------------------------------===// +// 12.2. Vector Widening Integer Add/Subtract +//===----------------------------------------------------------------------===// +defm PseudoVWADDU : VPseudoBinaryW_VV_VX; +defm PseudoVWSUBU : VPseudoBinaryW_VV_VX; +defm PseudoVWADD : VPseudoBinaryW_VV_VX; +defm PseudoVWSUB : VPseudoBinaryW_VV_VX; +defm PseudoVWADDU : VPseudoBinaryW_WV_WX; +defm PseudoVWSUBU : VPseudoBinaryW_WV_WX; +defm PseudoVWADD : VPseudoBinaryW_WV_WX; +defm PseudoVWSUB : VPseudoBinaryW_WV_WX; + + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -560,4 +713,16 @@ defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>; defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>; +//===----------------------------------------------------------------------===// +// 12.2. Vector Widening Integer Add/Subtract +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB">; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU">; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU">; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD">; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB">; + } // Predicates = [HasStdExtV] diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index b69cdde6c53269..a9575f6cd42c7f 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -395,6 +395,9 @@ class VReg regTypes, dag regList, int Vlmul> int Size = !mul(Vlmul, 64); // FIXME: assuming ELEN=64 } +// Dummy V register class. +def NoVReg : VReg<[vint8m1_t], (add V0), 0>; + def VR : VReg<[vint8mf2_t, vint8mf4_t, vint8mf8_t, vint16mf2_t, vint16mf4_t, vint32mf2_t, vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t, diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll new file mode 100644 index 00000000000000..9f15de98d5993f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll new file mode 100644 index 00000000000000..a65ea3a5dfae3c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll new file mode 100644 index 00000000000000..ee29f5631648a4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + , + , + i32); + +define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i16.i8( + , + i8, + i32); + +define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i16.i8( + , + i8, + i32); + +define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i16.i8( + , + i8, + i32); + +define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i16.i8( + , + i8, + i32); + +define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i16.i8( + , + i8, + i32); + +define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv32i16.i8( + , + i8, + i32); + +define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv32i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv32i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i32.i16( + , + i16, + i32); + +define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i32.i16( + , + i16, + i32); + +define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i32.i16( + , + i16, + i32); + +define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i32.i16( + , + i16, + i32); + +define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i32.i16( + , + i16, + i32); + +define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll new file mode 100644 index 00000000000000..828d32506a213f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + , + , + i64); + +define @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i16.i8( + , + i8, + i64); + +define @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i16.i8( + , + i8, + i64); + +define @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i16.i8( + , + i8, + i64); + +define @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i16.i8( + , + i8, + i64); + +define @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i16.i8( + , + i8, + i64); + +define @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv32i16.i8( + , + i8, + i64); + +define @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv32i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv32i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i32.i16( + , + i16, + i64); + +define @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i32.i16( + , + i16, + i64); + +define @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i32.i16( + , + i16, + i64); + +define @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i32.i16( + , + i16, + i64); + +define @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv16i32.i16( + , + i16, + i64); + +define @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv16i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv16i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv1i64.i32( + , + i32, + i64); + +define @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv1i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv1i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv2i64.i32( + , + i32, + i64); + +define @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv2i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv2i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv4i64.i32( + , + i32, + i64); + +define @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv4i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv4i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwadd.w.nxv8i64.i32( + , + i32, + i64); + +define @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwadd.w.nxv8i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwadd.w.mask.nxv8i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll new file mode 100644 index 00000000000000..2525172977968c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll new file mode 100644 index 00000000000000..631dade5b8e1ae --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll new file mode 100644 index 00000000000000..ae5241d60dae3b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + , + , + i32); + +define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i16.i8( + , + i8, + i32); + +define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i16.i8( + , + i8, + i32); + +define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i16.i8( + , + i8, + i32); + +define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i16.i8( + , + i8, + i32); + +define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i16.i8( + , + i8, + i32); + +define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv32i16.i8( + , + i8, + i32); + +define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv32i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i32.i16( + , + i16, + i32); + +define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i32.i16( + , + i16, + i32); + +define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i32.i16( + , + i16, + i32); + +define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i32.i16( + , + i16, + i32); + +define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i32.i16( + , + i16, + i32); + +define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll new file mode 100644 index 00000000000000..e260842cab8ab5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + , + , + i64); + +define @intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i16.i8( + , + i8, + i64); + +define @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i16.i8( + , + i8, + i64); + +define @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i16.i8( + , + i8, + i64); + +define @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i16.i8( + , + i8, + i64); + +define @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i16.i8( + , + i8, + i64); + +define @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv32i16.i8( + , + i8, + i64); + +define @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv32i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i32.i16( + , + i16, + i64); + +define @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i32.i16( + , + i16, + i64); + +define @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i32.i16( + , + i16, + i64); + +define @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i32.i16( + , + i16, + i64); + +define @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv16i32.i16( + , + i16, + i64); + +define @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv16i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv1i64.i32( + , + i32, + i64); + +define @intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv1i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv1i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv2i64.i32( + , + i32, + i64); + +define @intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv2i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv2i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv4i64.i32( + , + i32, + i64); + +define @intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv4i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv4i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.nxv8i64.i32( + , + i32, + i64); + +define @intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwaddu.w.nxv8i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwaddu.w.mask.nxv8i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll new file mode 100644 index 00000000000000..f5884729b06f9f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll new file mode 100644 index 00000000000000..8ba2e2c14d841b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll new file mode 100644 index 00000000000000..47504572a48924 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + , + , + i32); + +define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i16.i8( + , + i8, + i32); + +define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i16.i8( + , + i8, + i32); + +define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i16.i8( + , + i8, + i32); + +define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i16.i8( + , + i8, + i32); + +define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i16.i8( + , + i8, + i32); + +define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv32i16.i8( + , + i8, + i32); + +define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv32i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv32i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i32.i16( + , + i16, + i32); + +define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i32.i16( + , + i16, + i32); + +define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i32.i16( + , + i16, + i32); + +define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i32.i16( + , + i16, + i32); + +define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i32.i16( + , + i16, + i32); + +define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll new file mode 100644 index 00000000000000..8eed85f92ad573 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + , + , + i64); + +define @intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i16.i8( + , + i8, + i64); + +define @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i16.i8( + , + i8, + i64); + +define @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i16.i8( + , + i8, + i64); + +define @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i16.i8( + , + i8, + i64); + +define @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i16.i8( + , + i8, + i64); + +define @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv32i16.i8( + , + i8, + i64); + +define @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv32i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv32i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i32.i16( + , + i16, + i64); + +define @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i32.i16( + , + i16, + i64); + +define @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i32.i16( + , + i16, + i64); + +define @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i32.i16( + , + i16, + i64); + +define @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv16i32.i16( + , + i16, + i64); + +define @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv16i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv16i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv1i64.i32( + , + i32, + i64); + +define @intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv1i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv1i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv2i64.i32( + , + i32, + i64); + +define @intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv2i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv2i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv4i64.i32( + , + i32, + i64); + +define @intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv4i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv4i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsub.w.nxv8i64.i32( + , + i32, + i64); + +define @intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsub.w.nxv8i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsub.w.mask.nxv8i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll new file mode 100644 index 00000000000000..9ecf4e8dd3ccc4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , + , + i32); + +define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll new file mode 100644 index 00000000000000..221bc10b3898e4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i64_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i64_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i64_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i64_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i64_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i64_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i64_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i64_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll new file mode 100644 index 00000000000000..4c35bec0258cd4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + , + , + i32); + +define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i16.i8( + , + i8, + i32); + +define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i16.i8( + , + i8, + i32); + +define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i16.i8( + , + i8, + i32); + +define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i16.i8( + , + i8, + i32); + +define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i16.i8( + , + i8, + i32); + +define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv32i16.i8( + , + i8, + i32); + +define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv32i16.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( + , + , + i8, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i32.i16( + , + i16, + i32); + +define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i32.i16( + , + i16, + i32); + +define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i32.i16( + , + i16, + i32); + +define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i32.i16( + , + i16, + i32); + +define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i32.i16( + , + i16, + i32); + +define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i32.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( + , + , + i16, + , + i32); + +define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll new file mode 100644 index 00000000000000..41d8e55fb15dc9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + , + , + i64); + +define @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i16.i8( + , + i8, + i64); + +define @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i16.i8( + , + i8, + i64); + +define @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i16.i8( + , + i8, + i64); + +define @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i16.i8( + , + i8, + i64); + +define @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i16.i8( + , + i8, + i64); + +define @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv32i16.i8( + , + i8, + i64); + +define @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv32i16.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( + , + , + i8, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i32.i16( + , + i16, + i64); + +define @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i32.i16( + , + i16, + i64); + +define @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i32.i16( + , + i16, + i64); + +define @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i32.i16( + , + i16, + i64); + +define @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv16i32.i16( + , + i16, + i64); + +define @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv16i32.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( + , + , + i16, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv1i64.i32( + , + i32, + i64); + +define @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv1i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv1i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv2i64.i32( + , + i32, + i64); + +define @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv2i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv2i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv4i64.i32( + , + i32, + i64); + +define @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv4i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv4i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.nxv8i64.i32( + , + i32, + i64); + +define @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vwsubu.w.nxv8i64.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vwsubu.w.mask.nxv8i64.i32( + , + , + i32, + , + i64); + +define @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +}